repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
sarpy | sarpy-master/sarpy/io/DEM/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/consistency/sicd_consistency.py | """
A module for performing a selection of validation checks on a SICD (nitf) file,
or the xml file containing the sicd structure.
Use the `check_file` function directly, or perform using the command line
>>> python -m sarpy.consistency.sicd_consistency <file_name>
For more information, about command line usage, see
>>> python -m sarpy.consistency.sicd_consistency --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import sys
import argparse
import os
from typing import Union
from sarpy.io.xml.base import parse_xml_from_string, validate_xml_from_string
from sarpy.io.general.nitf import NITFDetails
from sarpy.io.general.nitf_elements.des import DataExtensionHeader, \
DataExtensionHeader0
from sarpy.io.complex.sicd import SICDReader, SICDDetails, extract_clas
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_schema import get_urn_details, get_schema_path, \
get_specification_identifier
logger = logging.getLogger('validation')
def evaluate_xml_versus_schema(xml_string, urn_string):
"""
Check validity of the xml string versus the appropriate schema.
Parameters
----------
xml_string : str|bytes
urn_string : str
Returns
-------
None|bool
"""
try:
the_schema = get_schema_path(urn_string)
except KeyError:
logger.exception('SICD: Failed getting the schema for urn {}'.format(urn_string))
return False
try:
return validate_xml_from_string(xml_string, the_schema, output_logger=logger)
except ImportError:
return None
def _evaluate_xml_string_validity(xml_string):
"""
Check the validity of the SICD xml, as defined by the given string.
Parameters
----------
xml_string : str|bytes
Returns
-------
(bool, str, SICDType)
"""
root_node, xml_ns = parse_xml_from_string(xml_string)
if xml_ns is None:
raise ValueError(
'SICD XML invalid, because no apparent namespace defined in the xml,\n\t'
'which starts `{}...`'.format(xml_string[:15]))
if 'default' not in xml_ns:
raise ValueError(
'Could not properly interpret the namespace collection from xml\n{}'.format(xml_ns))
sicd_urn = xml_ns['default']
# check that our urn is mapped
try:
_ = get_urn_details(sicd_urn)
check_schema = True
except Exception as e:
logger.exception('SICD: The SICD namespace has unrecognized value')
check_schema = False
valid_xml = None
if check_schema:
valid_xml = evaluate_xml_versus_schema(xml_string, sicd_urn)
if valid_xml is None:
valid_xml = True
# perform the various sicd structure checks
the_sicd = SICDType.from_node(root_node, xml_ns=xml_ns)
valid_sicd_contents = the_sicd.is_valid(recursive=True, stack=False)
return valid_xml & valid_sicd_contents, sicd_urn, the_sicd
def check_sicd_data_extension(nitf_details, des_header, xml_string):
"""
Evaluate a SICD data extension for validity.
Parameters
----------
nitf_details : NITFDetails
des_header : DataExtensionHeader|DataExtensionHeader0
xml_string : str|bytes
Returns
-------
(bool, SICDType)
"""
def check_des_header_fields():
# type: () -> bool
des_id = des_header.DESID.strip() if nitf_details.nitf_version == '02.10' else des_header.DESTAG.strip()
if des_id != 'XML_DATA_CONTENT':
logger.warning('SICD: Found old style SICD DES Header. This is deprecated.')
return True
# make sure that the NITF urn is evaluated for sensibility
nitf_urn = des_header.UserHeader.DESSHTN.strip()
try:
nitf_urn_details = get_urn_details(nitf_urn)
except Exception:
logger.exception('SICD: The SICD DES.DESSHTN must be a recognized urn')
return False
# make sure that the NITF urn and SICD urn actually agree
header_good = True
if nitf_urn != xml_urn:
logger.error('SICD: The SICD DES.DESSHTN ({}) and urn ({}) must agree'.format(nitf_urn, xml_urn))
header_good = False
# make sure that the NITF DES fields are populated appropriately for NITF urn
if des_header.UserHeader.DESSHSI.strip() != get_specification_identifier():
logger.error(
'SICD: DES.DESSHSI has value `{}`,\n\tbut should have value `{}`'.format(
des_header.UserHeader.DESSHSI.strip(), get_specification_identifier()))
header_good = False
nitf_version = nitf_urn_details['version']
if des_header.UserHeader.DESSHSV.strip() != nitf_version:
logger.error(
'SICD: DES.DESSHSV has value `{}`,\n\tbut should have value `{}` based on DES.DESSHTN `{}`'.format(
des_header.UserHeader.DESSHSV.strip(), nitf_version, nitf_urn))
header_good = False
nitf_date = nitf_urn_details['date']
if des_header.UserHeader.DESSHSD.strip() != nitf_date:
logger.warning(
'SICD: DES.DESSHSD has value `{}`,\n\tbut should have value `{}` based on DES.DESSHTN `{}`'.format(
des_header.UserHeader.DESSHSD.strip(), nitf_date, nitf_urn))
return header_good
def compare_sicd_class():
# type: () -> bool
if the_sicd.CollectionInfo is None or the_sicd.CollectionInfo.Classification is None:
logger.error(
'SICD: SICD.CollectionInfo.Classification is not populated,\n\t'
'so can not be compared with SICD DES.DESCLAS `{}`'.format(des_header.Security.CLAS.strip()))
return False
sicd_class = the_sicd.CollectionInfo.Classification
extracted_class = extract_clas(the_sicd)
if extracted_class != des_header.Security.CLAS.strip():
logger.warning(
'SICD: DES.DESCLAS is `{}`,\n\tand SICD.CollectionInfo.Classification '
'is {}'.format(des_header.Security.CLAS.strip(), sicd_class))
if des_header.Security.CLAS.strip() != nitf_details.nitf_header.Security.CLAS.strip():
logger.warning(
'SICD: DES.DESCLAS is `{}`,\n\tand NITF.CLAS is `{}`'.format(
des_header.Security.CLAS.strip(), nitf_details.nitf_header.Security.CLAS.strip()))
return True
# check sicd xml structure for validity
valid_sicd, xml_urn, the_sicd = _evaluate_xml_string_validity(xml_string)
# check that the sicd information and header information appropriately match
valid_header = check_des_header_fields()
# check that the classification seems to make sense
valid_class = compare_sicd_class()
return valid_sicd & valid_header & valid_class, the_sicd
def check_sicd_file(nitf_details):
"""
Check the validity of the given NITF file as a SICD file.
Parameters
----------
nitf_details : str|NITFDetails
The path to the NITF file, or a `NITFDetails` object.
Returns
-------
bool
"""
def check_data_extension_headers():
# type: () -> (str, Union[DataExtensionHeader, DataExtensionHeader0])
sicd_des = []
for i in range(nitf_details.des_subheader_offsets.size):
subhead_bytes = nitf_details.get_des_subheader_bytes(i)
des_bytes = None
if subhead_bytes.startswith(b'DEXML_DATA_CONTENT'):
des_bytes = nitf_details.get_des_bytes(i)
elif subhead_bytes.startswith(b'DESIDD_XML'):
raise ValueError(
'This file contains an old format SIDD DES, and should be a SIDD file')
elif subhead_bytes.startswith(b'DESICD_XML'):
des_bytes = nitf_details.get_des_bytes(i)
if des_bytes is None:
continue
# compare the SICD structure and the des header structure
if nitf_details.nitf_version == '02.00':
des_header = DataExtensionHeader0.from_bytes(subhead_bytes, start=0)
elif nitf_details.nitf_version == '02.10':
des_header = DataExtensionHeader.from_bytes(subhead_bytes, start=0)
else:
raise ValueError('Got unhandled NITF version {}'.format(nitf_details.nitf_version))
try:
des_bytes = des_bytes.decode('utf-8').strip().encode()
root_node, xml_ns = parse_xml_from_string(des_bytes)
# namespace makes this ugly
if 'SIDD' in root_node.tag:
raise ValueError(
'This file contains a SIDD DES, and should be a SIDD file')
elif 'SICD' in root_node.tag:
sicd_des.append((i, des_bytes, des_header))
except Exception as e:
logger.error('Failed parsing the xml DES entry {} as xml'.format(i))
raise e
if len(sicd_des) == 0:
raise ValueError('No SICD DES values found, so this is not a viable SICD file')
elif len(sicd_des) > 1:
raise ValueError(
'Multiple SICD DES values found at indices {},\n'
'so this is not a viable SICD file'.format([entry[0] for entry in sicd_des]))
return sicd_des[0][1], sicd_des[0][2]
def check_image_data():
# type: () -> bool
# get pixel type
pixel_type = the_sicd.ImageData.PixelType
if pixel_type == 'RE32F_IM32F':
exp_nbpp = 32
exp_pvtype = 'R'
elif pixel_type == 'RE16I_IM16I':
exp_nbpp = 16
exp_pvtype = 'SI'
elif pixel_type == 'AMP8I_PHS8I':
exp_nbpp = 8
exp_pvtype = 'INT'
else:
raise ValueError('Got unexpected pixel type {}'.format(pixel_type))
valid_images = True
# verify that all images have the correct pixel type
for i, img_header in enumerate(nitf_details.img_headers):
if img_header.ICAT.strip() != 'SAR':
valid_images = False
logger.error(
'SICD: image segment at index {} of {} has ICAT = `{}`,\n\texpected to be `SAR`'.format(
i, len(nitf_details.img_headers), img_header.ICAT.strip()))
if img_header.PVTYPE.strip() != exp_pvtype:
valid_images = False
logger.error(
'SICD: image segment at index {} of {} has PVTYPE = `{}`,\n\t'
'expected to be `{}` based on pixel type {}'.format(
i, len(nitf_details.img_headers), img_header.PVTYPE.strip(), exp_pvtype, pixel_type))
if img_header.NBPP != exp_nbpp:
valid_images = False
logger.error(
'SICD: image segment at index {} of {} has NBPP = `{}`,\n\t'
'expected to be `{}` based on pixel type {}'.format(
i, len(nitf_details.img_headers), img_header.NBPP, exp_nbpp, pixel_type))
if len(img_header.Bands) != 2:
valid_images = False
logger.error('SICD: image segment at index {} of {} does not have two (I/Q or M/P) bands'.format(
i, len(nitf_details.img_headers)))
continue
if pixel_type == 'AMP8I_PHS8I':
if img_header.Bands[0].ISUBCAT.strip() != 'M' and img_header.Bands[1].ISUBCAT.strip() != 'P':
valid_images = False
logger.error(
'SICD: pixel_type is {}, image segment at index {} of {}\n\t'
'has bands with ISUBCAT {}, expected ("M", "P")'.format(
pixel_type, i, len(nitf_details.img_headers),
(img_header.Bands[0].ISUBCAT.strip(), img_header.Bands[1].ISUBCAT.strip())))
else:
if img_header.Bands[0].ISUBCAT.strip() != 'I' and img_header.Bands[1].ISUBCAT.strip() != 'Q':
valid_images = False
logger.error(
'SICD: pixel_type is {}, image segment at index {} of {}\n\t'
'has bands with ISUBCAT {}, expected ("I", "Q")'.format(
pixel_type, i, len(nitf_details.img_headers),
(img_header.Bands[0].ISUBCAT.strip(), img_header.Bands[1].ISUBCAT.strip())))
return valid_images
if isinstance(nitf_details, str):
if not os.path.isfile(nitf_details):
raise ValueError('Got string input, but it is not a valid path')
nitf_details = NITFDetails(nitf_details)
if not isinstance(nitf_details, NITFDetails):
raise TypeError(
'Input is expected to be a path to a NITF file, or a NITFDetails object instance')
# find the sicd header
sicd_xml_string, des_header = check_data_extension_headers()
# check that the sicd and header are valid
valid_sicd_des, the_sicd = check_sicd_data_extension(nitf_details, des_header, sicd_xml_string)
# check that the image segments all make sense compared to the sicd structure
valid_img = check_image_data()
all_valid = valid_sicd_des & valid_img
if valid_img:
try:
reader = SICDReader(nitf_details.file_name)
except Exception as e:
logger.exception(
'SICD: All image segments appear viable for the SICD,\n\t'
'but SICDReader construction failed')
return all_valid
def check_file(file_name):
"""
Check the SICD validity for the given file SICD (i.e. appropriately styled NITF)
or xml file containing the SICD structure alone.
Parameters
----------
file_name : str|SICDDetails
Returns
-------
bool
"""
if isinstance(file_name, str):
if not os.path.isfile(file_name):
raise ValueError('Got string input, but it is not a valid path')
# check if this is just an xml file
with open(file_name, 'rb') as fi:
initial_bits = fi.read(30)
if initial_bits.startswith(b'<?xml') or initial_bits.startswith(b'<SICD'):
sicd_xml = fi.read()
return _evaluate_xml_string_validity(sicd_xml)[0]
return check_sicd_file(file_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser('SICD Consistency')
parser.add_argument('file_name')
parser.add_argument(
'-l', '--level', default='WARNING',
choices=['INFO', 'WARNING', 'ERROR'], help="Logging level")
config = parser.parse_args()
logging.basicConfig(level=config.level)
logger.setLevel(config.level)
validity = check_file(config.file_name)
if validity:
logger.info('\nSICD: {} has been validated with no errors'.format(config.file_name))
else:
logger.error('\nSICD: {} has apparent errors'.format(config.file_name))
sys.exit(int(validity))
| 15,087 | 37.197468 | 115 | py |
sarpy | sarpy-master/sarpy/consistency/consistency.py | #
# Copyright 2020-2021 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
__classification__ = "UNCLASSIFIED"
__author__ = "Nathan Bombaci, Valkyrie"
import collections
import contextlib
import linecache
import re
import sys
import textwrap
from typing import List, Dict
import numpy as np
def _exception_stack():
"""
Helper function to parse call stack of an exception
Returns
-------
List[Dict]
{'filename': str, 'lineno': int, 'line': str} for each traceback in the current exception
"""
try:
exctype, value, tb = sys.exc_info()
stack = []
tback = tb
while tback is not None:
frame = tback.tb_frame
filename = frame.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, tback.tb_lineno, frame.f_globals)
stack.append({'filename': filename,
'lineno': tback.tb_lineno,
'line': line.strip()})
tback = tback.tb_next
finally:
exctype = value = tb = None
return stack
class ConsistencyChecker(object):
"""
Base class for implementing consistency checkers.
This class can be used to perform and log comparisons. Each comparison
can be logged as either an ``'Error'`` or a ``'Warning'``.
"""
def __init__(self):
self._all_check_results = collections.OrderedDict()
self._active_check = None
names = [name for name in dir(self) if name.startswith('check_')]
attrs = [getattr(self, name) for name in sorted(names)]
self.funcs = [attr for attr in attrs if hasattr(attr, '__call__')]
def check(self, func_name=None, *, allow_prefix=False, ignore_patterns=None):
"""
Run checks.
Parameters
----------
func_name: None|str|List[str]
List of check functions to run. If omitted, then all check functions
will be run.
allow_prefix: bool
If ``True``, runs tests with names starting with any `func_name`
If ``False``, runs tests with names equal to any `func_name`
ignore_patterns: list-like of str
Skips tests if zero or more characters at the beginning of their name match the regular expression patterns
"""
# run specified test(s) or all of them
if func_name is None:
funcs = self.funcs
else:
if isinstance(func_name, str):
func_name = [func_name]
def matches_prefix(requested, actual):
return actual.startswith(requested)
def matches_exact(requested, actual):
return requested == actual
qualifier = matches_prefix if allow_prefix else matches_exact
funcs = []
not_found = []
for requested_func in set(func_name):
matches = [func for func in self.funcs if qualifier(requested_func, func.__name__)]
funcs.extend(matches)
if not matches:
not_found.append(requested_func)
if not_found:
raise ValueError(f"Functions not found: {not_found}")
for pattern in (ignore_patterns or []):
funcs = [func for func in funcs if not re.match(pattern, func.__name__)]
for func in funcs:
self._run_check(func)
def _run_check(self, func):
"""
Runs a single 'check_' method and store the results.
Parameters
----------
func: Callable
Run the supplied function
"""
self._active_check = {
'doc': func.__doc__,
'details': [],
'passed': True}
# func() will populate self._active_check
try:
func()
except Exception as e:
stack = _exception_stack()
message = []
for indent, frame in enumerate(stack[1:]):
message.append(' '*indent*4 + "line#{lineno}: {line}".format(lineno=frame['lineno'],
line=frame['line']))
message.append(str(e))
self._add_item_to_current('Error', False, '\n'.join(message), details="Exception Raised")
self._all_check_results[func.__name__] = self._active_check
self._active_check = None
def _add_item_to_current(self, severity, passed, message, details=''):
"""
Records the result of a test.
Parameters
----------
severity : str
Severity level of the results e.g. 'Error', 'Warning'
passed : bool
The result of the test
message : str
Text message describing the test
details : str
Additional message details
"""
item = {'severity': severity,
'passed': passed,
'message': message,
'details': str(details)}
self._active_check['details'].append(item)
self._active_check['passed'] &= passed
def _format_assertion(self, e, depth=1):
"""
Format an assertion to human-readable text.
Parameters
----------
e : Exception
The exception to be formatted
depth : int
Which level of the exception stack to format
Returns
-------
formatted : str
Formatted stack level containing line number and line text
"""
stack = _exception_stack()
frame = stack[depth]
return ("line#{lineno}: {line}".format(lineno=frame['lineno'], line=frame['line'])
+ '\n' + '\n'.join(str(x) for x in e.args))
@contextlib.contextmanager
def need(self, details=None):
"""Context manager for scoping 'Error' level checks
Parameters
----------
details : None|str
Text describing the scope of checks
"""
with self._crave('Error', details=details):
yield
@contextlib.contextmanager
def want(self, details=None):
"""Context manager for scoping 'Warning' level checks
Parameters
----------
details : None|str
Text describing the scope of checks
"""
with self._crave('Warning', details=details):
yield
@contextlib.contextmanager
def _crave(self, level, details, depth=2):
"""
Context manager for scoping checks
Parameters
----------
level : str
Severity level of the checks. e.g. 'Error' or 'Warning'
details : str|None
Text describing the scope of checks
depth : int
Depth in the exception stack to look for check information
"""
try:
yield
if self._active_check is not None:
self._add_item_to_current(level, True, '', details=details)
except AssertionError as e:
if self._active_check is None:
raise
if not details:
stack = _exception_stack()
details = stack[depth]['line']
self._add_item_to_current(level, False, self._format_assertion(e, depth=depth), details=details)
@contextlib.contextmanager
def precondition(self, details=None):
"""
Context manager for scoping conditional ('No-Op' level) checks
Parameters
----------
details : None|str
Text describing the scope of checks
"""
try:
yield
except AssertionError as e:
if self._active_check is None:
return
if not details:
stack = _exception_stack()
details = stack[1]['line']
self._add_item_to_current('No-Op', True, self._format_assertion(e), details=details)
def all(self):
"""
Returns all results.
Returns
-------
Dict
Unfiltered dictionary of all (Passed, Failed, Skipped) results
"""
return self._all_check_results
def failures(self, omit_passed_sub=False):
"""
Returns failure results.
Parameters
----------
omit_passed_sub : bool
If True, passed sub-checks will be omitted.
Returns
-------
Dict
Dictionary containing only results of failed checks
"""
retval = collections.OrderedDict()
for k, v in self._all_check_results.items():
if not v['passed']:
retval[k] = dict(v)
if omit_passed_sub:
retval[k]['details'] = [d for d in v['details'] if not d['passed']]
return retval
def passes(self):
"""
Returns passed checks that are not wholly No-Op.
Returns
-------
Dict
Dictionary containing checks that are not wholly No-Op
"""
return {k: v for k, v in self.all().items()
if v['passed'] and any(d['severity'] != 'No-Op' for d in v['details'])}
def skips(self):
"""
Returns passed checks that are wholly No-Op.
Returns
-------
Dict
Dictionary containing checks that are wholly No-Op
"""
return {k: v for k, v in self.all().items()
if v['passed'] and all(d['severity'] == 'No-Op' for d in v['details'])}
def print_result(self, include_passed_asserts=True, color=True, include_passed_checks=False, width=120,
skip_detail=False, fail_detail=False, pass_detail=False):
"""
Print results to stdout.
Parameters
----------
include_passed_asserts : bool
Print asserts which passed
color : bool
Colorize the output
include_passed_checks : bool
Print checks which passed
width : int
Output up to `width` columns
skip_detail : bool
Include details of skips
fail_detail: bool
Include details of failures
pass_detail: bool
Include details of passes
"""
to_print = collections.OrderedDict()
for k, v in self._all_check_results.items():
if include_passed_checks or not v['passed']:
to_print[k] = dict(v)
to_print[k]['details'] = [d for d in v['details'] if include_passed_asserts or not d['passed']]
if color:
coloration = {('Error', True): ['[Pass]', 'green', 'bold'],
('Error', False): ['[Error]', 'red', 'bold'],
('Warning', True): ['[Pass]', 'cyan'],
('Warning', False): ['[Warning]', 'yellow'],
('No-Op', True): ['[Skip]', 'blue']}
else:
coloration = {('Error', True): ['[Need]'],
('Error', False): ['[Error]'],
('Warning', True): ['[Want]'],
('Warning', False): ['[Warning]'],
('No-Op', True): ['[Skip]']}
indent = 4
for case, details in to_print.items():
print(f"{case}: {str(details['doc']).strip()}")
if details['details']:
for sub in details['details']:
lead = in_color(*coloration[sub['severity'], sub['passed']])
need_want = {'Error': 'Need', 'Warning': 'Want', 'No-Op': 'Unless'}[sub['severity']]
print("{indent}{lead} {need_want}: {details}".format(indent=' '*indent,
lead=lead,
need_want=need_want,
details=sub['details']))
if (skip_detail and sub['severity'] == 'No-Op'
or (fail_detail and not sub['passed'])
or (pass_detail and sub['passed'])):
for line in sub['message'].splitlines():
message = '\n'.join(textwrap.wrap(line, width=width,
subsequent_indent=' '*(indent + 8),
initial_indent=' '*(indent+4)))
print(message)
else:
print("{}---: No test performed".format(' '*indent))
class Approx:
"""
Wrapper for performing approximate value comparisons.
Parameters
----------
value : float
The Value to be compared.
atol : float
Absolute tolerance
rtol : float
Relative tolerance
See Also
--------
pytest.approx
"""
# Tell numpy to use our comparison operators
__array_ufunc__ = None
__array_priority__ = 100
def __init__(self, value, atol=1e-10, rtol=1e-6):
self.value = value
self.atol = atol
self.rtol = rtol
def __lt__(self, rhs):
return self.__le__(rhs)
def __le__(self, rhs):
return np.all(np.logical_or(np.less(self.value, rhs), self._isclose(rhs)))
def __eq__(self, rhs):
return np.all(self._isclose(rhs))
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __ge__(self, rhs):
return np.all(np.logical_or(np.greater(self.value, rhs), self._isclose(rhs)))
def __gt__(self, rhs):
return self.__ge__(rhs)
def __repr__(self):
tol = self.atol + np.abs(np.asarray(self.value)) * self.rtol
return f"{self.value} ± {tol}"
def _isclose(self, rhs):
return np.isclose(rhs, self.value, rtol=self.rtol, atol=self.atol)
def in_color(string, *color):
"""
Wrap a string with ANSI color control characters.
Parameters
----------
string : str
The string to colorize.
*color : str
color identifiers to use. See `start_color`.
Returns
-------
str
ANSI colorized version of `string`
"""
if color:
start = ''.join(start_color(c) for c in color)
return "{}{}{}".format(start, string, END_COLOR)
else:
return string
END_COLOR = "\x1b[0m"
def start_color(color):
"""
Get an ANSI color control character.
Parameters
----------
color : {'black', 'red', 'green', 'yellow', 'blue', 'purple', 'cyan', 'white', 'bold', 'light', 'invert'}
Desired color
Returns
-------
str
ANSI color control for desired color
"""
color_table = dict(
black=30,
red=31,
green=32,
yellow=33,
blue=34,
purple=35,
cyan=36,
white=37,
bold=1,
light=2,
invert=7,
)
return "\x1b[%sm" % color_table[color]
| 15,118 | 29.298597 | 119 | py |
sarpy | sarpy-master/sarpy/consistency/cphd_consistency.py | #
# Copyright 2020-2021 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
__classification__ = "UNCLASSIFIED"
__author__ = "Nathan Bombaci, Valkyrie"
import logging
import argparse
import collections
import copy
import functools
import itertools
import numbers
import os
import re
from typing import List
import numpy as np
import numpy.polynomial.polynomial as npp
import scipy.constants
from sarpy.geometry import geocoords
import sarpy.consistency.consistency as con
import sarpy.consistency.parsers as parsers
import sarpy.io.phase_history.cphd1_elements.CPHD
import sarpy.io.phase_history.cphd1_elements.utils as cphd1_utils
from sarpy.io.phase_history import cphd_schema
logger = logging.getLogger(__name__)
try:
import pytest
except ImportError:
pytest = None
logger.critical(
'Functionality for CPHD consistency testing cannot proceed WITHOUT the pytest '
'package')
try:
from lxml import etree
except ImportError:
etree = None
pytest = None
logger.critical(
'Functionality for CPHD consistency testing cannot proceed WITHOUT the lxml '
'package')
try:
import shapely.geometry as shg
have_shapely = True
except ImportError:
have_shapely = False
try:
import networkx as nx
have_networkx = True
except ImportError:
have_networkx = False
INVALID_CHAR_REGEX = re.compile(r'\W')
def strip_namespace(root):
"""
Returns a copy of the input etree with namespaces removed.
Parameters
----------
root : etree.ElementTree
The element tree
Returns
-------
etree.ElementTree
The element tree
"""
root_copy = copy.deepcopy(root)
# strip namespace from each element
for elem in root_copy.iter():
try:
elem.tag = elem.tag.split('}')[-1]
except (AttributeError, TypeError):
pass
# remove default namespace
nsmap = root_copy.nsmap
nsmap.pop(None, None)
new_root = etree.Element(root_copy.tag, nsmap)
new_root[:] = root_copy[:]
return new_root
def parse_pvp_elem(elem):
"""
Reverse of `pvp_elem`.
Parameters
----------
elem : etree.ElementTree.Element
Node for the specified PVP parameter.
Returns
-------
Tuple
Tuple (parameter_name, {``'offset'``:offset, ``'size'``:size, ``'dtype'``:dtype}). PVP element information.
"""
if elem.tag == "AddedPVP":
name = elem.find('Name').text
else:
name = elem.tag
offset = int(elem.find('Offset').text)
size = int(elem.find('Size').text)
dtype = cphd1_utils.binary_format_string_to_dtype(elem.find('Format').text)
return name, {"offset": offset,
"size": size,
"dtype": dtype}
def read_header(file_handle):
"""Reads a CPHD header from a file.
Parameters
----------
file_handle
Readable File object, i.e., ``file_handle = open(filename, 'rb')``.
Handle of the CPHD file that is to be read
Returns
-------
Dict
Dictionary containing CPHD header values.
"""
file_handle.seek(0, 0)
version = file_handle.readline().decode()
assert version.startswith('CPHD/1.0') or version.startswith('CPHD/1.1')
header = sarpy.io.phase_history.cphd1_elements.CPHD.CPHDHeader.from_file_object(file_handle)
return {k: getattr(header, k) for k in header._fields if getattr(header, k) is not None}
def per_channel(method):
"""
Decorator to mark check methods as being applicable to each CPHD channel
Parameters
----------
method : Callable
Method to mark
Returns
-------
Callable
Marked input `method`
"""
method.per_channel = True
return method
def get_by_id(xml, path, identifier):
"""
Matches the first element that has a child named Identifier whose text is `identifier`.
Parameters
----------
xml : etree.Element
Root node of XPath expression
path : str
XPath expression relative to `xml`
identifier : str
Value of child Identifier node
Returns
-------
None|etree.Element
node found by path with an Identifier node with value of `identifier` or None if a match is not found
"""
return xml.find(f'{path}[Identifier="{identifier}"]')
class CphdConsistency(con.ConsistencyChecker):
"""
Check CPHD file structure and metadata for internal consistency
Parameters
----------
cphdroot : etree.Element
root CPHD XML node
pvps : None|Dict[str, np.ndarray]
numpy structured array of PVPs
header : Dict
CPHD header key value pairs
filename : None|str
Path to CPHD file (or None if not available)
schema : str
Path to CPHD XML Schema. If None, tries to find a version-specific schema
check_signal_data: bool
Should the signal array be checked for invalid values
"""
def __init__(self, cphdroot, pvps, header, filename, schema=None, check_signal_data=False):
super(CphdConsistency, self).__init__()
self.xml_with_ns = etree.fromstring(etree.tostring(cphdroot)) # handle element or tree -> element
self.xml = strip_namespace(self.xml_with_ns)
self.pvps = pvps
self.filename = filename
self.header = header
self.version = self._version_lookup()
if schema is None and self.version is not None:
urn = {v['release']: k for k, v in cphd_schema.urn_mapping.items()}[self.version]
self.schema = cphd_schema.get_schema_path(urn)
else:
self.schema = schema
self.check_signal_data = check_signal_data
channel_ids = [x.text for x in self.xml.findall('./Data/Channel/Identifier')]
# process decorated methods to generate per-channel tests
# reverse the enumerated list so that we don't disturb indices on later iterations as we insert into the list
for index, func in reversed(list(enumerate(self.funcs))):
if getattr(func, 'per_channel', False):
subfuncs = []
for channel_id in channel_ids:
channel_node = self.xml.xpath('./Channel/Parameters/Identifier[text()="{}"]/..'.format(
channel_id))[0]
subfunc = functools.partial(func, channel_id, channel_node)
this_doc = func.__doc__.strip()
if this_doc.endswith('.'):
this_doc = this_doc[:-1]
subfunc.__doc__ = f"{this_doc} for channel {channel_id}."
modified_channel_id = re.sub(INVALID_CHAR_REGEX, '_', channel_id)
subfunc.__name__ = "{name}_{chanid}".format(name=func.__name__, chanid=modified_channel_id)
subfuncs.append(subfunc)
self.funcs[index:index+1] = subfuncs
@classmethod
def from_file(cls, filename, schema=None, check_signal_data=False):
"""
Create a CphdConsistency object from a CPHD file.
Parameters
----------
filename : str
Path to CPHD file
schema : str
Path to CPHD XML Schema. If None, tries to find a version-specific schema
check_signal_data : bool
Should the signal array be checked for invalid values
Returns
-------
CphdConsistency
new object
"""
with open(filename, 'rb') as infile:
try:
header = None
cphdroot = etree.parse(infile)
pvp_block = None
except etree.XMLSyntaxError:
header = read_header(infile)
infile.seek(header['XML_BLOCK_BYTE_OFFSET'], 0)
xml_block = infile.read(header['XML_BLOCK_SIZE'])
cphdroot = etree.fromstring(xml_block)
infile.seek(header['PVP_BLOCK_BYTE_OFFSET'], 0)
pvp_block = infile.read(header['PVP_BLOCK_SIZE'])
cphdroot_no_ns = strip_namespace(etree.fromstring(etree.tostring(cphdroot)))
fields = [parse_pvp_elem(field) for field in list(cphdroot_no_ns.find('./PVP'))]
dtype = np.dtype({'names': [name for name, _ in fields],
'formats': [info['dtype'] for _, info in fields],
'offsets': [info['offset']*8 for _, info in fields]}).newbyteorder('B')
if pvp_block is None:
pvps = None
else:
pvps = {}
for channel_node in cphdroot_no_ns.findall('./Data/Channel'):
channel_id = channel_node.findtext('./Identifier')
channel_pvps = np.frombuffer(pvp_block, dtype=dtype,
count=int(channel_node.findtext('./NumVectors')),
offset=int(channel_node.findtext('./PVPArrayByteOffset')))
pvps[channel_id] = channel_pvps
return cls(cphdroot, pvps, header, filename, schema=schema, check_signal_data=check_signal_data)
def _version_lookup(self):
"""
Returns the version string associated with the XML instance or None if a match is not found.
"""
this_ns = etree.QName(self.xml_with_ns).namespace
if this_ns is None:
return None
for schema_info in cphd_schema.urn_mapping.values():
schema_path = schema_info.get('schema')
if schema_path is not None and this_ns == etree.parse(schema_path).getroot().get('targetNamespace'):
return schema_info['release']
def _get_channel_pvps(self, channel_id):
"""
Returns the PVPs associated with the channel keyed by `channel_id` or raises an AssertionError.
"""
assert self.pvps is not None
assert channel_id in self.pvps
return self.pvps[channel_id]
def check_file_type_header(self):
"""
Version in File Type Header matches the version in the XML.
"""
with self.precondition():
assert self.version is not None
assert self.filename is not None
with open(self.filename, 'rb') as fd:
first_line = fd.readline().decode()
assert first_line.startswith('CPHD/')
assert first_line.endswith('\n')
file_type_header_version = first_line[len('CPHD/'):-1]
with self.need("version in File Type Header matches the version in the XML"):
assert self.version == file_type_header_version
def check_header_keys(self):
"""
Asserts that the required keys are in the header.
"""
with self.precondition():
assert self.header is not None
required_fields = set(['XML_BLOCK_SIZE', 'XML_BLOCK_BYTE_OFFSET',
'PVP_BLOCK_SIZE', 'PVP_BLOCK_BYTE_OFFSET',
'SIGNAL_BLOCK_SIZE', 'SIGNAL_BLOCK_BYTE_OFFSET',
'CLASSIFICATION', 'RELEASE_INFO'])
for name in required_fields:
with self.need('Required header field: {} is in header'.format(name)):
assert name in self.header
with self.precondition():
assert 'SUPPORT_BLOCK_SIZE' in self.header
with self.need("SUPPORT_BLOCK fields go together"):
assert 'SUPPORT_BLOCK_BYTE_OFFSET' in self.header
with self.precondition():
assert 'SUPPORT_BLOCK_BYTE_OFFSET' in self.header
with self.need("SUPPORT_BLOCK fields go together"):
assert 'SUPPORT_BLOCK_SIZE' in self.header
def check_classification_and_release_info(self):
"""
Asserts that the Classification and ReleaseInfo fields are the same in header and the xml.
"""
with self.precondition():
assert self.header is not None
with self.need("Header CLASSIFICATION matches XML Classification"):
assert self.header['CLASSIFICATION'] == self.xml.findtext('./CollectionID/Classification') is not None
with self.need("Header RELEASE_INFO matches XML ReleaseInfo"):
assert self.header['RELEASE_INFO'] == self.xml.findtext('./CollectionID/ReleaseInfo') is not None
def check_against_schema(self):
"""
The XML matches the schema.
"""
with self.need(f"Schema available for checking xml whose root tag = {self.xml_with_ns.tag}"):
assert self.schema is not None
schema = etree.XMLSchema(file=str(self.schema))
with self.need("XML passes schema"):
assert schema.validate(self.xml_with_ns), schema.error_log
@per_channel
def check_channel_dwell_exist(self, channel_id, channel_node):
"""
The referenced Dwell and COD nodes exist.
"""
cod_id = channel_node.findtext('./DwellTimes/CODId')
with self.need(f"/Dwell/CODTime with Identifier={cod_id} exists for DwellTime in channel={channel_id}"):
assert get_by_id(self.xml, './Dwell/CODTime', cod_id) is not None
dwell_id = channel_node.findtext('./DwellTimes/DwellId')
with self.need(f"/Dwell/DwellTime with Identifier={dwell_id} exists for DwellTime in channel={channel_id}"):
assert get_by_id(self.xml, './Dwell/DwellTime', dwell_id) is not None
@per_channel
def check_channel_dwell_polys(self, channel_id, channel_node):
"""
/Dwell/CODTime/CODTimePoly and /Dwell/DwellTime/DwellTimePoly are consistent with other metadata.
"""
cod_node = get_by_id(self.xml, './Dwell/CODTime', channel_node.findtext('./DwellTimes/CODId'))
dwell_node = get_by_id(self.xml, './Dwell/DwellTime', channel_node.findtext('./DwellTimes/DwellId'))
codtime_poly = parsers.parse_poly2d(cod_node.find('./CODTimePoly'))
dwelltime_poly = parsers.parse_poly2d(dwell_node.find('./DwellTimePoly'))
def _get_image_area_polygon(image_area_elem):
inner_polygon = image_area_elem.find('./Polygon')
if inner_polygon is not None:
return shg.Polygon(self.get_polygon(inner_polygon))
x1, y1 = parsers.parse_xy(image_area_elem.find('./X1Y1'))
x2, y2 = parsers.parse_xy(image_area_elem.find('./X2Y2'))
return shg.box(x1, y1, x2, y2)
image_area_elem = channel_node.find('./ImageArea')
if image_area_elem is None:
image_area_elem = self.xml.find('./SceneCoordinates/ImageArea')
image_area_polygon = _get_image_area_polygon(image_area_elem)
def _get_points_in_polygon(polygon, grid_size=25):
bounds = np.asarray(polygon.bounds).reshape(2, 2) # [[xmin, ymin], [xmax, ymax]]
mesh = np.stack(np.meshgrid(np.linspace(bounds[0, 0], bounds[1, 0], grid_size),
np.linspace(bounds[0, 1], bounds[1, 1], grid_size)), axis=-1)
coords = shg.MultiPoint(np.concatenate([mesh.reshape(-1, 2),
np.asarray(polygon.exterior.coords)[:-1, :]], axis=0))
return np.asarray([pt.coords for pt in polygon.intersection(coords).geoms])
sampled_iacs = _get_points_in_polygon(image_area_polygon).T
sampled_cods = npp.polyval2d(*sampled_iacs, codtime_poly)
sampled_dwells = npp.polyval2d(*sampled_iacs, dwelltime_poly)
with self.need("/Dwell/DwellTime/DwellTimePoly is nonnegative in image area"):
assert sampled_dwells.min() >= 0.0
sampled_tref1 = sampled_cods - 0.5 * sampled_dwells
sampled_tref2 = sampled_cods + 0.5 * sampled_dwells
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
mask = np.isfinite(pvp['TxTime'])
def calc_tref(v):
r_xmt = np.linalg.norm(v['TxPos'] - v['SRPPos'])
r_rcv = np.linalg.norm(v['RcvPos'] - v['SRPPos'])
return v['TxTime'] + r_xmt / (r_xmt + r_rcv) * (v['RcvTime'] - v['TxTime'])
pvps_tref1 = calc_tref(pvp[mask][0])
pvps_tref2 = calc_tref(pvp[mask][-1])
with self.need("/Dwell/CODTime/CODTimePoly and /Dwell/DwellTime/DwellTimePoly supported by PVPs"):
assert sampled_tref1.min() >= con.Approx(pvps_tref1, atol=100e-6)
assert sampled_tref2.max() <= con.Approx(pvps_tref2, atol=100e-6)
def check_antenna(self):
"""
Check that antenna node is consistent.
"""
with self.precondition():
antenna_node = self.xml.find("./Antenna")
assert antenna_node is not None
expected_num_acfs = int(antenna_node.findtext("./NumACFs"))
actual_num_acfs = len(antenna_node.findall("./AntCoordFrame"))
with self.need("The NumACFs must be equal to the number of ACF nodes."):
assert expected_num_acfs == actual_num_acfs
expected_num_apcs = int(antenna_node.findtext("./NumAPCs"))
actual_num_apcs = len(antenna_node.findall("./AntPhaseCenter"))
with self.need("The NumAPCs must be equal to the number of APC nodes."):
assert expected_num_apcs == actual_num_apcs
expected_num_antpats = int(antenna_node.findtext("./NumAntPats"))
actual_num_antpats = len(antenna_node.findall("AntPattern"))
with self.need("The NumAntPats must be equal to the number of AntPattern nodes."):
assert expected_num_antpats == actual_num_antpats
apc_acfids = antenna_node.findall("./AntPhaseCenter/ACFId")
apc_acf_ids_text = {apc_acfid.text for apc_acfid in apc_acfids}
acf_identifiers = antenna_node.findall("./AntCoordFrame/Identifier")
acf_identifiers_text = {acf_identifier.text for acf_identifier in acf_identifiers}
with self.need("./AntPhaseCenter/ACFId references an identifier in AntCoordFrame."):
assert apc_acf_ids_text <= acf_identifiers_text
@per_channel
def check_channel_antenna_exist(self, channel_id, channel_node):
"""
The antenna patterns and phase centers exist if declared.
"""
with self.precondition():
assert channel_node.find('./Antenna') is not None
for side in 'Tx', 'Rcv':
apc_id = channel_node.findtext('./Antenna/{}APCId'.format(side))
with self.need("AntPhaseCenter node exists with name {} (for {})".format(apc_id, side)):
assert get_by_id(self.xml, './Antenna/AntPhaseCenter/', apc_id) is not None
apat_id = channel_node.findtext('./Antenna/{}APATId'.format(side))
with self.need("AntPattern node exists with name {} (for {})".format(apat_id, side)):
assert get_by_id(self.xml, './Antenna/AntPattern/', apat_id) is not None
@per_channel
def check_channel_txrcv_exist(self, channel_id, channel_node):
"""
The declared TxRcv nodes exist.
"""
with self.precondition():
assert channel_node.find('./TxRcv') is not None
for tx_wf_id in channel_node.findall('./TxRcv/TxWFId'):
with self.need("TxWFParameters node exists with id {}".format(tx_wf_id.text)):
assert get_by_id(self.xml, './TxRcv/TxWFParameters', tx_wf_id.text) is not None
for rcv_id in channel_node.findall('./TxRcv/RcvId'):
with self.need("RcvParameters node exists with id {}".format(rcv_id.text)):
assert get_by_id(self.xml, './TxRcv/RcvParameters', rcv_id.text) is not None
@per_channel
def check_time_monotonic(self, channel_id, channel_node):
"""
PVP times increase monotonically.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
for side in 'Tx', 'Rcv':
spvp = pvp['{}Time'.format(side)]
mask = np.isfinite(spvp)
with self.need("{}Time is monotonic (diff > 0)".format(side)):
assert np.all(np.greater(np.diff(spvp[mask]), 0))
@per_channel
def check_rcv_after_tx(self, channel_id, channel_node):
"""
RcvTime is after TxTime.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
tx_time = pvp['TxTime']
rcv_time = pvp['RcvTime']
mask = np.logical_and(np.isfinite(pvp['TxTime']), np.isfinite(pvp['RcvTime']))
with self.need("Rcv after Tx"):
assert np.all(np.greater(rcv_time[mask], tx_time[mask]))
@per_channel
def check_rcv_finite(self, channel_id, channel_node):
"""
RcvTime and Pos are finite.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
rcv_time = pvp['RcvTime']
rcv_pos = pvp['RcvPos']
with self.need("RcvTime"):
assert np.all(np.isfinite(rcv_time))
with self.need("RcvPos"):
assert np.all(np.isfinite(rcv_pos))
@per_channel
def check_channel_fxfixed(self, channel_id, channel_node):
"""
PVP agrees with FXFixed.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
fx1_tol = con.Approx(np.nanmean(pvp['FX1']))
fx2_tol = con.Approx(np.nanmean(pvp['FX2']))
fx1_min_max = np.array([pvp['FX1'].min(), pvp['FX1'].max()])
fx2_min_max = np.array([pvp['FX2'].min(), pvp['FX2'].max()])
with self.precondition():
assert parsers.parse_bool(channel_node.find('./FXFixed'))
with self.need("FX1 does not change"):
assert fx1_min_max == fx1_tol
with self.need("FX2 does not change"):
assert fx2_min_max == fx2_tol
with self.precondition():
assert not parsers.parse_bool(channel_node.find('./FXFixed'))
with self.need("FX1 and/or FX2 are not exactly constant"):
assert not ((pvp['FX1'].min() == pvp['FX1'].max()) and (pvp['FX2'].min() == pvp['FX2'].max()))
with self.want("FX1 and/or FX2 is not almost constant"):
assert not ((fx1_min_max == fx1_tol) and (fx2_min_max == fx2_tol))
@per_channel
def check_channel_toafixed(self, channel_id, channel_node):
"""
PVP agrees with TOAFixed.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
toa1_tol = con.Approx(np.nanmean(pvp['TOA1']), atol=1e-11)
toa2_tol = con.Approx(np.nanmean(pvp['TOA2']), atol=1e-11)
toa1_min_max = np.array([pvp['TOA1'].min(), pvp['TOA1'].max()])
toa2_min_max = np.array([pvp['TOA2'].min(), pvp['TOA2'].max()])
with self.precondition():
assert parsers.parse_bool(channel_node.find('./TOAFixed'))
with self.need("TOA1 does not change"):
assert toa1_min_max == toa1_tol
with self.need("TOA2 does not change"):
assert toa2_min_max == toa2_tol
with self.precondition():
assert not parsers.parse_bool(channel_node.find('./TOAFixed'))
with self.need("TOA1 and/or TOA2 are not exactly constant"):
assert not ((pvp['TOA1'].min() == pvp['TOA1'].max()) and (pvp['TOA2'].min() == pvp['TOA2'].max()))
with self.want("TOA1 and/or TOA2 is not almost constant"):
assert not ((toa1_min_max == toa1_tol) and (toa2_min_max == toa2_tol))
@per_channel
def check_channel_srpfixed(self, channel_id, channel_node):
"""
PVP agrees with SRPFixed.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.precondition():
assert parsers.parse_bool(channel_node.find('./SRPFixed'))
with self.need("SRPPos is fixed"):
assert con.Approx(np.nanmean(pvp['SRPPos'], axis=0), atol=1e-3) == pvp['SRPPos']
with self.precondition():
assert not parsers.parse_bool(channel_node.find('./SRPFixed'))
with self.need("SRPPos is not exactly fixed"):
assert not np.array_equal(np.nanmin(pvp['SRPPos'], axis=0), np.nanmax(pvp['SRPPos'], axis=0))
with self.want("SRPPos is not approximately fixed"):
assert not (con.Approx(np.nanmean(pvp['SRPPos'], axis=0), atol=1e-3) == pvp['SRPPos'])
def check_file_fxfixed(self):
"""
The FXFixedCPHD element matches the rest of the file.
"""
fxc_vals = np.array([float(elem.text) for elem in self.xml.findall('./Channel/Parameters/FxC')])
fxc_minmax = np.array([fxc_vals.min(), fxc_vals.max()])
fxc_tol = con.Approx(fxc_vals.mean())
fx_bw_vals = np.array([float(elem.text) for elem in self.xml.findall('./Channel/Parameters/FxBW')])
fx_bw_minmax = np.array([fx_bw_vals.min(), fx_bw_vals.max()])
fx_bw_tol = con.Approx(fx_bw_vals.mean())
with self.precondition():
assert parsers.parse_bool(self.xml.find('./Channel/FXFixedCPHD'))
with self.need("All channels have FXFixed"):
assert all(parsers.parse_bool(elem) for elem in self.xml.findall('./Channel/Parameters/FXFixed'))
with self.need("All channels have same FxC"):
assert fxc_minmax == fxc_tol
with self.need("All channels have same FxBW"):
assert fx_bw_minmax == fx_bw_tol
with self.precondition():
assert not parsers.parse_bool(self.xml.find('./Channel/FXFixedCPHD'))
assert all(parsers.parse_bool(elem) for elem in self.xml.findall('./Channel/Parameters/FXFixed'))
with self.need("Channels are not the same"):
assert not (fxc_vals.min() == fxc_vals.max() and fx_bw_vals.min() == fx_bw_vals.max())
with self.precondition():
assert self.pvps is not None
pvp = np.concatenate(list(self.pvps.values()))
fx1_tol = con.Approx(np.nanmean(pvp['FX1']))
fx2_tol = con.Approx(np.nanmean(pvp['FX2']))
fx1_min_max = np.array([pvp['FX1'].min(), pvp['FX1'].max()])
fx2_min_max = np.array([pvp['FX2'].min(), pvp['FX2'].max()])
with self.precondition():
assert parsers.parse_bool(self.xml.find('./Channel/FXFixedCPHD'))
with self.need("FX1 does not change"):
assert fx1_min_max == fx1_tol
with self.need("FX2 does not change"):
assert fx2_min_max == fx2_tol
with self.precondition():
assert not parsers.parse_bool(self.xml.find('./Channel/FXFixedCPHD'))
with self.need("FX1 and/or FX2 are not exactly constant"):
assert not ((pvp['FX1'].min() == pvp['FX1'].max()) and (pvp['FX2'].min() == pvp['FX2'].max()))
with self.want("FX1 and/or FX2 is not almost constant"):
assert not ((fx1_min_max == fx1_tol) and (fx2_min_max == fx2_tol))
def check_file_toafixed(self):
"""
The TOAFixedCPHD element matches the rest of the file.
"""
with self.precondition():
assert parsers.parse_bool(self.xml.find('./Channel/TOAFixedCPHD'))
with self.need("All channels have TOAFixed"):
assert all(parsers.parse_bool(elem) for elem in self.xml.findall('./Channel/Parameters/TOAFixed'))
with self.precondition():
assert self.pvps is not None
pvp = np.concatenate(list(self.pvps.values()))
toa1_tol = con.Approx(np.nanmean(pvp['TOA1']), atol=1e-11)
toa2_tol = con.Approx(np.nanmean(pvp['TOA2']), atol=1e-11)
toa1_min_max = np.array([pvp['TOA1'].min(), pvp['TOA1'].max()])
toa2_min_max = np.array([pvp['TOA2'].min(), pvp['TOA2'].max()])
with self.precondition():
assert parsers.parse_bool(self.xml.find('./Channel/TOAFixedCPHD'))
with self.need("TOA1 does not change"):
assert toa1_min_max == toa1_tol
with self.need("TOA2 does not change"):
assert toa2_min_max == toa2_tol
with self.precondition():
assert not parsers.parse_bool(self.xml.find('./Channel/TOAFixedCPHD'))
with self.need("TOA1 and/or TOA2 is not exactly constant"):
assert not ((pvp['TOA1'].min() == pvp['TOA1'].max()) and (pvp['TOA2'].min() == pvp['TOA2'].max()))
with self.want("TOA1 and/or TOA2 is not almost constant"):
assert not ((toa1_min_max == toa1_tol) and (toa2_min_max == toa2_tol))
def check_file_srpfixed(self):
"""
The SRPFixedCPHD element matches the rest of the file.
"""
with self.precondition():
assert parsers.parse_bool(self.xml.find('./Channel/SRPFixedCPHD'))
with self.need("All channels have SRPFixed"):
assert all(parsers.parse_bool(elem) for elem in self.xml.findall('./Channel/Parameters/SRPFixed'))
with self.precondition():
assert self.pvps is not None
pvp = np.concatenate(list(self.pvps.values()))
with self.precondition():
assert parsers.parse_bool(self.xml.find('./Channel/SRPFixedCPHD'))
with self.need("SRPPos is fixed"):
assert con.Approx(np.nanmean(pvp['SRPPos'], axis=0), atol=1e-3) == pvp['SRPPos']
with self.precondition():
assert not parsers.parse_bool(self.xml.find('./Channel/SRPFixedCPHD'))
with self.need("SRPPos is not exactly fixed"):
assert not np.array_equal(np.nanmin(pvp['SRPPos'], axis=0), np.nanmax(pvp['SRPPos'], axis=0))
with self.want("SRPPos is not approximately fixed"):
assert not (con.Approx(np.nanmean(pvp['SRPPos'], axis=0), atol=1e-3) == pvp['SRPPos'])
@per_channel
def check_channel_signalnormal(self, channel_id, channel_node):
"""
PVP agrees with SignalNormal.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.precondition():
assert channel_node.find('./SignalNormal') is not None
with self.need('SIGNAL PVP present'):
assert 'SIGNAL' in pvp.dtype.names
with self.precondition():
assert 'SIGNAL' in pvp.dtype.names
with self.need('SignalNormal matches SIGNAL PVPs'):
assert np.all(pvp['SIGNAL'] == 1) == parsers.parse_bool(channel_node.find('./SignalNormal'))
@per_channel
def check_channel_fxc(self, channel_id, channel_node):
"""
PVP agrees with FxC.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.need("FxC is (max(fx2) + min(fx1)) / 2"):
assert (con.Approx(float(channel_node.findtext('./FxC')))
== (np.nanmax(pvp['FX2']) + np.nanmin(pvp['FX1'])) / 2)
@per_channel
def check_channel_fxbw(self, channel_id, channel_node):
"""
PVP agrees with FxBW.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.need("FxBW is max(fx2) - min(fx1)"):
assert (con.Approx(float(channel_node.findtext('./FxBW')))
== np.nanmax(pvp['FX2']) - np.nanmin(pvp['FX1']))
@per_channel
def check_channel_fxbwnoise(self, channel_id, channel_node):
"""
PVP agrees with FxBWNoise.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.precondition():
assert channel_node.find('./FxBWNoise') is not None
with self.need("Domain is FX when FxBWNoise is provided"):
assert self.xml.findtext('./Global/DomainType') == 'FX'
with self.need("FxBWNoise is max(FXN2) - min(FXN1)"):
assert (con.Approx(float(channel_node.findtext('./FxBWNoise')))
== np.nanmax(pvp['FXN2']) - np.nanmin(pvp['FXN1']))
@per_channel
def check_channel_toasaved(self, channel_id, channel_node):
"""
PVP agrees with TOASaved.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.need("TOASaved is max(TOA2) - min(TOA1)"):
assert (con.Approx(float(channel_node.findtext('./TOASaved')))
== np.nanmax(pvp['TOA2']) - np.nanmin(pvp['TOA1']))
@per_channel
def check_channel_toaextsaved(self, channel_id, channel_node):
"""
PVP agrees with TOAExtSaved.
"""
toa_ext_saved_text = channel_node.findtext('./TOAExtended/TOAExtSaved')
has_toa_ext_saved = toa_ext_saved_text is not None
has_toae1 = self.xml.findtext('./PVP/TOAE1') is not None
has_toae2 = self.xml.findtext('./PVP/TOAE2') is not None
with self.want('TOA extended swath parameters are specified together'):
assert has_toa_ext_saved == has_toae1 == has_toae2
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
assert has_toa_ext_saved
assert {'TOAE1', 'TOAE2'}.issubset(pvp.dtype.fields)
with self.need("TOAExtSaved is max(TOAE2) - min(TOAE1)"):
assert con.Approx(float(toa_ext_saved_text)) == np.nanmax(pvp['TOAE2']) - np.nanmin(pvp['TOAE1'])
@per_channel
def check_channel_fx_osr(self, channel_id, channel_node):
"""
FX domain vectors are sufficiently sampled
"""
with self.precondition():
assert self.xml.findtext('./Global/DomainType') == 'FX'
pvp = self._get_channel_pvps(channel_id)
if {'TOAE1', 'TOAE2'}.issubset(pvp.dtype.fields):
toa_xtnt = pvp['TOAE2'] - pvp['TOAE1']
else:
toa_xtnt = pvp['TOA2'] - pvp['TOA1']
fx_osr = 1 / (pvp['SCSS'] * toa_xtnt)
with self.need('FX_OSR is at least 1.1'):
assert np.nanmin(fx_osr) >= 1.1
with self.want('FX_OSR is at least 1.2'):
assert np.nanmin(fx_osr) >= 1.2
@per_channel
def check_channel_toa_osr(self, channel_id, channel_node):
"""
TOA domain vectors are sufficiently sampled
"""
with self.precondition():
assert self.xml.findtext('./Global/DomainType') == 'TOA'
pvp = self._get_channel_pvps(channel_id)
fx_bw = pvp['FX2'] - pvp['FX1']
toa_osr = 1 / (pvp['SCSS'] * fx_bw)
with self.need('TOA_OSR is at least 1.1'):
assert np.nanmin(toa_osr) >= 1.1
with self.want('TOA_OSR is at least 1.2'):
assert np.nanmin(toa_osr) >= 1.2
@per_channel
def check_channel_global_txtime(self, channel_id, channel_node):
"""
PVP within global TxTime1 and TxTime2.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.need("TxTime is greater than TxTime1"):
assert np.nanmin(pvp['TxTime']) >= con.Approx(float(self.xml.findtext('./Global/Timeline/TxTime1')))
with self.need("TxTime is less than TxTime2"):
assert np.nanmax(pvp['TxTime']) <= con.Approx(float(self.xml.findtext('./Global/Timeline/TxTime2')))
@per_channel
def check_channel_global_fxminmax(self, channel_id, channel_node):
"""
PVP within global FxMin and FxMax.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.need("FX1 is greater than FxMin"):
assert np.nanmin(pvp['FX1']) >= con.Approx(float(self.xml.findtext('./Global/FxBand/FxMin')))
with self.need("FX2 is less than FxMax"):
assert np.nanmax(pvp['FX2']) <= con.Approx(float(self.xml.findtext('./Global/FxBand/FxMax')))
@per_channel
def check_channel_global_toaswath(self, channel_id, channel_node):
"""
PVP within global TOASwath.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
with self.need("TOA1 is greater than TOAMin"):
assert np.nanmin(pvp['TOA1']) >= con.Approx(float(self.xml.findtext('./Global/TOASwath/TOAMin')))
with self.need("TOA2 is less than TOAMax"):
assert np.nanmax(pvp['TOA2']) <= con.Approx(float(self.xml.findtext('./Global/TOASwath/TOAMax')))
@per_channel
def check_channel_afdop(self, channel_id, channel_node):
"""
aFDOP PVP is consistent with other PVPs.
"""
def calc_rdot(pos, vel, srp):
return (vel * unit(pos - srp)).sum(axis=-1)
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
rdot_xmt_srp = calc_rdot(pvp['TxPos'], pvp['TxVel'], pvp['SRPPos'])
rdot_rcv_srp = calc_rdot(pvp['RcvPos'], pvp['RcvVel'], pvp['SRPPos'])
rdot_avg_srp = 0.5 * (rdot_xmt_srp + rdot_rcv_srp)
afdop_expected = rdot_avg_srp * (-2 / scipy.constants.speed_of_light)
mask = np.logical_and(np.isfinite(afdop_expected), np.isfinite(pvp['aFDOP']))
assert mask.any()
assert np.count_nonzero(pvp['aFDOP']) # CPHD advises these "may be set equal to zero for all vectors"
with self.want("aFDOP consistent with other PVPs"):
assert afdop_expected[mask] == con.Approx(pvp['aFDOP'][mask], atol=1e-9)
@per_channel
def check_channel_afrr1_afrr2_relative(self, channel_id, channel_node):
"""
aFRR1 & aFRR2 PVPs are related by fx_C.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
fx_c = 0.5 * (pvp['FX1'] + pvp['FX2'])
mask = np.logical_and(np.isfinite(fx_c), np.isfinite(pvp['aFRR1']), np.isfinite(pvp['aFRR2']))
assert mask.any()
with self.want("aFRR1 == (FX1 + FX2) * aFRR2 / 2"):
assert pvp['aFRR1'][mask] / (fx_c[mask] * pvp['aFRR2'][mask]) == con.Approx(1)
def _get_channel_tx_lfmrates(self, channel_node):
tx_lfmrates = set()
for txwdid_node in channel_node.findall('./TxRcv/TxWFId'):
this_lfmrate = self.xml.findtext(f'./TxRcv/TxWFParameters[Identifier="{txwdid_node.text}"]/LFMRate')
if this_lfmrate is not None:
tx_lfmrates.add(float(this_lfmrate))
assert tx_lfmrates
return np.fromiter(tx_lfmrates, float)
@per_channel
def check_channel_afrr1(self, channel_id, channel_node):
"""
aFRR1 is consistent with /TxRcv/TxWFParameters/LFMRate.
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
fx_c = 0.5 * (pvp['FX1'] + pvp['FX2'])
tx_lfmrates = self._get_channel_tx_lfmrates(channel_node)
with np.errstate(divide='ignore'):
derived_fx_rate = fx_c * 2 / (scipy.constants.speed_of_light * pvp['aFRR1'])
mask = np.isfinite(derived_fx_rate)
assert mask.any()
derived_fx_matches_tx_lfmrates = np.isclose(derived_fx_rate[mask, np.newaxis],
tx_lfmrates[np.newaxis, :]).any(axis=1)
inconsistent_derived_lfmrates = derived_fx_rate[mask][~derived_fx_matches_tx_lfmrates].tolist()
with self.want(f"aFRR1 is consistent with /TxRcv/TxWFParameters/LFMRate(s): {tx_lfmrates}"):
assert not inconsistent_derived_lfmrates
@per_channel
def check_channel_afrr2(self, channel_id, channel_node):
"""
aFRR2 is consistent with /TxRcv/TxWFParameters/LFMRate(s).
"""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
tx_lfmrates = self._get_channel_tx_lfmrates(channel_node)
with np.errstate(divide='ignore'):
derived_fx_rate = 2 / (scipy.constants.speed_of_light * pvp['aFRR2'])
mask = np.isfinite(derived_fx_rate)
assert mask.any()
derived_fx_matches_tx_lfmrates = np.isclose(derived_fx_rate[mask, np.newaxis],
tx_lfmrates[np.newaxis, :]).any(axis=1)
inconsistent_derived_lfmrates = derived_fx_rate[mask][~derived_fx_matches_tx_lfmrates].tolist()
with self.want(f"aFRR2 is consistent with /TxRcv/TxWFParameters/LFMRate(s): {tx_lfmrates}"):
assert not inconsistent_derived_lfmrates
@per_channel
def check_channel_imagearea_polygon(self, channel_id, channel_node):
"""
Image area polygon is simple and consistent with X1Y1 and X2Y2.
"""
polygon_node = channel_node.find('./ImageArea/Polygon')
with self.precondition():
assert polygon_node is not None
with self.precondition():
assert have_shapely
polygon = self.get_polygon(polygon_node, check=True)
x1y1 = parsers.parse_xy(channel_node.find('./ImageArea/X1Y1'))
x2y2 = parsers.parse_xy(channel_node.find('./ImageArea/X2Y2'))
with self.need("Polygon works with X1Y1"):
assert polygon.min(axis=0) == con.Approx(x1y1, atol=1e-3)
with self.need("Polygon works with X2Y2"):
assert polygon.max(axis=0) == con.Approx(x2y2, atol=1e-3)
with self.need("Polygon is simple"):
assert shg.Polygon(polygon).is_simple
@per_channel
def check_channel_identifier_uniqueness(self, channel_id, channel_node):
"""
Identifier nodes within /Channel/Parameters are unique.
"""
identifier_sets = (
{'./TxRcv/TxWFId'},
{'./TxRcv/RcvId'},
)
for identifier_set in identifier_sets:
these_identifiers = []
for path in identifier_set:
these_identifiers.extend(x.text for x in channel_node.findall(path))
repeated_identifiers = _get_repeated_elements(these_identifiers)
with self.want(f'Identifiers {identifier_set} are unique'):
assert not repeated_identifiers
@per_channel
def check_channel_rcv_sample_rate(self, channel_id, channel_node):
"""
/TxRcv/RcvParameters/SampleRate sufficient to support saved TOA swath.
"""
toa_swath = float(channel_node.findtext('./TOAExtended/TOAExtSaved', np.nan))
if np.isnan(toa_swath):
toa_swath = float(channel_node.findtext('./TOASaved'))
txwf_ids = {x.text for x in channel_node.findall('./TxRcv/TxWFId')}
rcv_ids = {x.text for x in channel_node.findall('./TxRcv/RcvId')}
with self.precondition():
assert len(txwf_ids) == 1 and len(rcv_ids) == 1
txwf_params = get_by_id(self.xml, './TxRcv/TxWFParameters', next(iter(txwf_ids)))
rcv_params = get_by_id(self.xml, './TxRcv/RcvParameters', next(iter(rcv_ids)))
tx_lfm_rate = float(txwf_params.findtext('./LFMRate', np.nan))
rcv_lfm_rate = float(rcv_params.findtext('./LFMRate', np.nan))
assert np.isfinite([tx_lfm_rate, rcv_lfm_rate]).all()
tx_pulse_length = float(txwf_params.findtext('./PulseLength'))
rcv_sample_rate = float(rcv_params.findtext('./SampleRate'))
claimed_bw = abs(tx_lfm_rate - rcv_lfm_rate) * tx_pulse_length + abs(toa_swath * rcv_lfm_rate)
with self.need("/TxRcv/RcvParameters/SampleRate sufficient to support saved TOA swath"):
assert claimed_bw <= con.Approx(rcv_sample_rate)
def check_global_imagearea_polygon(self):
"""
Scene Image area polygon is simple and consistent with X1Y1 and X2Y2.
"""
scene_coords_node = self.xml.find('./SceneCoordinates')
polygon_node = scene_coords_node.find('./ImageArea/Polygon')
with self.precondition():
assert polygon_node is not None
with self.precondition():
assert have_shapely
polygon = self.get_polygon(polygon_node, check=True)
x1y1 = parsers.parse_xy(scene_coords_node.find('./ImageArea/X1Y1'))
x2y2 = parsers.parse_xy(scene_coords_node.find('./ImageArea/X2Y2'))
with self.need("Polygon works with X1Y1"):
assert polygon.min(axis=0) == con.Approx(x1y1, atol=1e-3)
with self.need("Polygon works with X2Y2"):
assert polygon.max(axis=0) == con.Approx(x2y2, atol=1e-3)
with self.need("Polygon is simple"):
assert shg.Polygon(polygon).is_simple
def get_polygon(self, polygon_node, check=False, reverse=False, parser=parsers.parse_xy):
vertex_nodes = sorted(list(polygon_node), key=lambda x: int(x.attrib['index']))
polygon = np.asarray([parser(vertex) for vertex in vertex_nodes])
if check:
with self.need("Polygon indices are all present"):
assert [int(x.attrib['index']) for x in vertex_nodes] == list(range(1, len(vertex_nodes) + 1))
if 'size' in polygon_node.attrib:
size = int(polygon_node.attrib['size'])
with self.need("Polygon size attribute matches the number of vertices"):
assert size == len(vertex_nodes)
shg_polygon = shg.Polygon(polygon)
with self.need("Polygon is simple"):
assert shg_polygon.is_simple
with self.need("Polygon is clockwise"):
assert not shg_polygon.exterior.is_ccw
return polygon
def check_geoinfo_polygons(self):
"""
GeoInfo polygons are simple polygons in clockwise order.
"""
geo_polygons = self.xml.findall('.//GeoInfo/Polygon')
with self.precondition():
assert geo_polygons
with self.precondition():
assert have_shapely
for geo_polygon in geo_polygons:
with self.need(etree.ElementTree(self.xml).getpath(geo_polygon)):
self.get_polygon(geo_polygon, check=True, reverse=True, parser=parsers.parse_ll)
def check_image_area_corner_points(self):
"""
The corner points represent a simple quadrilateral in clockwise order.
"""
with self.precondition():
assert have_shapely
iacp_node = self.xml.find('./SceneCoordinates/ImageAreaCornerPoints')
iacp = self.get_polygon(iacp_node, check=True, reverse=True, parser=parsers.parse_ll)
with self.need("4 corner points"):
assert len(iacp) == 4
def check_extended_imagearea_polygon(self):
"""
Scene extended area polygon is simple and consistent with X1Y1 and X2Y2.
"""
scene_coords_node = self.xml.find('./SceneCoordinates')
extended_area_node = scene_coords_node.find('./ExtendedArea')
with self.precondition():
assert extended_area_node is not None
extended_area_polygon_node = extended_area_node.find('./Polygon')
with self.precondition():
assert extended_area_polygon_node is not None
with self.precondition():
assert have_shapely
extended_area_polygon = self.get_polygon(extended_area_polygon_node, check=True)
extended_x1y1 = parsers.parse_xy(extended_area_node.find('./X1Y1'))
extended_x2y2 = parsers.parse_xy(extended_area_node.find('./X2Y2'))
with self.need("Polygon works with X1Y1"):
assert extended_area_polygon.min(axis=0) == con.Approx(extended_x1y1, atol=1e-3)
with self.need("Polygon works with X2Y2"):
assert extended_area_polygon.max(axis=0) == con.Approx(extended_x2y2, atol=1e-3)
polygon_node = scene_coords_node.find('./ImageArea/Polygon')
with self.precondition():
assert polygon_node is not None
polygon = self.get_polygon(polygon_node)
shg_extended = shg.Polygon(extended_area_polygon)
shg_polygon = shg.Polygon(polygon)
with self.need("Extended area polygon covers image area polygon"):
assert shg.Polygon(shg_extended).covers(shg_polygon)
@per_channel
def check_channel_imagearea_x1y1(self, channel_id, channel_node):
"""
Image area X1Y1 and X2Y2 work with global X1Y1 and X2Y2.
"""
with self.precondition():
assert channel_node.find('./ImageArea') is not None
x1y1 = parsers.parse_xy(channel_node.find('./ImageArea/X1Y1'))
x2y2 = parsers.parse_xy(channel_node.find('./ImageArea/X2Y2'))
with self.need("Channel/Parameters/ImageArea/X1Y1 < Channel/Parameters/ImageArea/X2Y2"):
assert x1y1[0] < x2y2[0]
assert x1y1[1] < x2y2[1]
global_x1y1 = parsers.parse_xy(self.xml.find('./SceneCoordinates/ImageArea/X1Y1'))
global_x2y2 = parsers.parse_xy(self.xml.find('./SceneCoordinates/ImageArea/X2Y2'))
with self.need("Channel/Parameters/ImageArea/X1Y1 bounded by SceneCoordinates/ImageArea/X1Y1"):
assert x1y1 >= con.Approx(global_x1y1)
with self.need("Channel/Parameters/ImageArea/X2Y2 bounded by SceneCoordinates/ImageArea/X2Y2"):
assert x2y2 <= con.Approx(global_x2y2)
def check_imagearea_x1y1_x2y2(self):
"""
SceneCoordinates/ImageArea is self-consistent.
"""
x1, y1 = parsers.parse_xy(self.xml.find('./SceneCoordinates/ImageArea/X1Y1'))
x2, y2 = parsers.parse_xy(self.xml.find('./SceneCoordinates/ImageArea/X2Y2'))
with self.need("SceneCoordinates/ImageArea/X1Y1 < SceneCoordinates/ImageArea/X2Y2"):
assert x1 < x2
assert y1 < y2
def check_extended_imagearea_x1y1_x2y2(self):
"""
Extended image area contains the image area.
"""
with self.precondition():
assert self.xml.find('./SceneCoordinates/ExtendedArea') is not None
extended_x1y1 = parsers.parse_xy(self.xml.find('./SceneCoordinates/ExtendedArea/X1Y1'))
extended_x2y2 = parsers.parse_xy(self.xml.find('./SceneCoordinates/ExtendedArea/X2Y2'))
with self.need("SceneCoordinates/ExtendedArea/X1Y1 < SceneCoordinates/ExtendedArea/X2Y2"):
assert extended_x1y1[0] < extended_x2y2[0]
assert extended_x1y1[1] < extended_x2y2[1]
global_x1y1 = parsers.parse_xy(self.xml.find('./SceneCoordinates/ImageArea/X1Y1'))
global_x2y2 = parsers.parse_xy(self.xml.find('./SceneCoordinates/ImageArea/X2Y2'))
with self.need("Extended X1Y1 less than image area X1Y1"):
assert extended_x1y1 <= con.Approx(global_x1y1)
with self.need("Extended X2Y2 geater than image area X2Y2"):
assert extended_x2y2 >= con.Approx(global_x2y2)
@per_channel
def check_channel_signal_data(self, channel_id, channel_node):
"""
Sample data is all finite.
"""
with self.precondition():
assert self.header is not None
format_string = self.xml.findtext('./Data/SignalArrayFormat')
signal_dtype = cphd1_utils.binary_format_string_to_dtype(format_string)
channel_data_node = get_by_id(self.xml, './Data/Channel', channel_id)
signal_offset = int(channel_data_node.findtext('./SignalArrayByteOffset'))
num_vectors = int(channel_data_node.findtext('./NumVectors'))
num_samples = int(channel_data_node.findtext('./NumSamples'))
signal_end = signal_offset + num_vectors * num_samples * signal_dtype.itemsize
signal_file_offset = self.header['SIGNAL_BLOCK_BYTE_OFFSET'] + signal_offset
with self.need("Channel signal fits in signal block"):
assert self.header['SIGNAL_BLOCK_SIZE'] >= signal_end
with self.precondition():
assert self.check_signal_data
assert self.filename is not None
assert format_string == 'CF8'
with self.need("All signal samples are finite and not NaN"):
assert np.all(np.isfinite(np.memmap(self.filename, signal_dtype.newbyteorder('B'), mode='r',
offset=signal_file_offset,
shape=(num_vectors, num_samples),
order='C')))
@per_channel
def check_channel_normal_signal_pvp(self, channel_id, channel_node):
"""SIGNAL PVP = 1 for at least half of the vectors."""
with self.precondition():
pvp = self._get_channel_pvps(channel_id)
assert 'SIGNAL' in pvp.dtype.fields
num_normal = np.count_nonzero(pvp['SIGNAL'] == 1)
with self.want("SIGNAL PVP = 1 for at least half of the vectors"):
assert num_normal / pvp.size >= 0.5
def check_image_grid_exists(self):
"""
Verify that the ImageGrid is defined
"""
with self.precondition():
with self.want("It is recommended to populate SceneCoordinates.ImageGrid for processing purposes"):
assert self.xml.find('./SceneCoordinates/ImageGrid') is not None
def check_pad_header_xml(self):
"""
The pad between the header and XML is 0.
"""
with self.precondition():
assert self.header is not None
with self.want("XML appears early in the file"):
assert self.header["XML_BLOCK_BYTE_OFFSET"] < 2**28
assert self.filename is not None
with open(self.filename, 'rb') as fp:
before_xml = fp.read(self.header['XML_BLOCK_BYTE_OFFSET'])
first_form_feed = before_xml.find('\f\n'.encode('utf-8'))
with self.need("header section terminator exists before XML"):
assert b'\f\n' in before_xml
with self.want("Pad is 0"):
assert np.all(np.frombuffer(before_xml[first_form_feed+2:], dtype=np.uint8) == 0)
def check_pad_after_xml(self):
"""
The pad after XML is 0.
"""
with self.precondition():
assert self.header is not None
assert self.filename is not None
xml_end = self.header['XML_BLOCK_BYTE_OFFSET'] + self.header['XML_BLOCK_SIZE']
if 'SUPPORT_BLOCK_BYTE_OFFSET' in self.header:
num_bytes_after_xml = self.header['SUPPORT_BLOCK_BYTE_OFFSET'] - xml_end
next_block = 'Support'
else:
num_bytes_after_xml = self.header['PVP_BLOCK_BYTE_OFFSET'] - xml_end
next_block = 'PVP'
with self.need("{} comes after XML".format(next_block)):
assert num_bytes_after_xml - 2 >= 0
bytes_after_xml = np.memmap(self.filename, np.uint8, mode='r', offset=xml_end, shape=num_bytes_after_xml)
with self.need("Section terminator exists"):
assert bytes_after_xml[:2].tobytes() == b'\f\n'
with self.want("Pad is 0"):
assert np.all(np.frombuffer(bytes_after_xml[2:], dtype=np.uint8) == 0)
def check_pad_after_support(self):
"""
The pad after support arrays is 0.
"""
with self.precondition():
assert self.header is not None
assert self.filename is not None
assert 'SUPPORT_BLOCK_BYTE_OFFSET' in self.header
support_end = self.header['SUPPORT_BLOCK_BYTE_OFFSET'] + self.header['SUPPORT_BLOCK_SIZE']
num_bytes_after_support = self.header['PVP_BLOCK_BYTE_OFFSET'] - support_end
with self.need("PVP comes after Support"):
assert num_bytes_after_support >= 0
bytes_after_support = np.memmap(self.filename, np.uint8, mode='r', offset=support_end,
shape=num_bytes_after_support)
with self.want("Pad is 0"):
assert np.all(np.frombuffer(bytes_after_support, dtype=np.uint8) == 0)
def check_pad_after_pvp(self):
"""
The pad after PVPs is 0.
"""
with self.precondition():
assert self.header is not None
assert self.filename is not None
pvp_end = self.header['PVP_BLOCK_BYTE_OFFSET'] + self.header['PVP_BLOCK_SIZE']
num_bytes_after_pvp = self.header['SIGNAL_BLOCK_BYTE_OFFSET'] - pvp_end
with self.need("Signal comes after PVP"):
assert num_bytes_after_pvp >= 0
bytes_after_pvp = np.memmap(self.filename, np.uint8, mode='r', offset=pvp_end,
shape=num_bytes_after_pvp)
with self.want("Pad is 0"):
assert np.all(np.frombuffer(bytes_after_pvp, dtype=np.uint8) == 0)
def check_signal_at_end_of_file(self):
"""
Signal is at the end of the file.
"""
with self.precondition():
assert self.header is not None
assert self.filename is not None
with self.need("Signal is at the end of the file"):
file_size = os.stat(self.filename).st_size
assert file_size == self.header['SIGNAL_BLOCK_BYTE_OFFSET'] + self.header['SIGNAL_BLOCK_SIZE']
def check_scene_plane_axis_vectors(self):
"""
Scene plane axis vectors are orthonormal.
"""
planar_node = self.xml.find('./SceneCoordinates/ReferenceSurface/Planar')
with self.precondition():
assert planar_node is not None
uiax = parsers.parse_xyz(planar_node.find('./uIAX'))
uiay = parsers.parse_xyz(planar_node.find('./uIAY'))
with self.need("uIAX is unit"):
assert np.linalg.norm(uiax) == con.Approx(1)
with self.need("uIAY is unit"):
assert np.linalg.norm(uiay) == con.Approx(1)
with self.need("uIAX and uIAY are orthogonal (dot is zero)"):
assert np.dot(uiax, uiay) == con.Approx(0, atol=1e-6)
def check_global_txtime_limits(self):
"""
The Global TxTime1 and TxTime2 match the PVPs.
"""
with self.precondition():
assert self.pvps is not None
txtime1_chan = min(np.nanmin(x['TxTime']) for x in self.pvps.values())
txtime2_chan = max(np.nanmax(x['TxTime']) for x in self.pvps.values())
with self.need("Timeline TxTime1 matches PVP"):
assert txtime1_chan == con.Approx(float(self.xml.findtext('./Global/Timeline/TxTime1')))
with self.need("Timeline TxTime2 matches PVP"):
assert txtime2_chan == con.Approx(float(self.xml.findtext('./Global/Timeline/TxTime2')))
def check_global_fx_band(self):
"""
The Global FXBand matches the PVPs.
"""
with self.precondition():
assert self.pvps is not None
fx1min_chan = min(np.nanmin(x['FX1']) for x in self.pvps.values())
fx2max_chan = max(np.nanmax(x['FX2']) for x in self.pvps.values())
with self.need("FxMin matches PVP"):
assert fx1min_chan == con.Approx(float(self.xml.findtext('./Global/FxBand/FxMin')))
with self.need("FxMax match PVP"):
assert fx2max_chan == con.Approx(float(self.xml.findtext('./Global/FxBand/FxMax')))
def check_global_toaswath(self):
"""
The Global TOASwath matches the PVPs.
"""
with self.precondition():
assert self.pvps is not None
toa1min_chan = min(np.nanmin(x['TOA1']) for x in self.pvps.values())
toa2max_chan = max(np.nanmax(x['TOA2']) for x in self.pvps.values())
with self.need("TOAMin matches PVP"):
assert toa1min_chan == pytest.approx(float(self.xml.findtext('./Global/TOASwath/TOAMin')))
with self.need("TOAMax matches PVP"):
assert toa2max_chan == pytest.approx(float(self.xml.findtext('./Global/TOASwath/TOAMax')))
def _check_ids_in_channel_for_optional_branch(self, branch_name):
with self.precondition():
assert self.xml.find('./{}'.format(branch_name)) is not None
with self.want("{} present in /Channel/Parameters".format(branch_name)):
assert self.xml.find('./Channel/Parameters/{}'.format(branch_name)) is not None
def check_antenna_ids_in_channel(self):
"""
If the Antenna branch exists, then Antenna is also present in /Channel/Parameters
"""
self._check_ids_in_channel_for_optional_branch('Antenna')
def check_txrcv_ids_in_channel(self):
"""
If the TxRcv branch exists, then TxRcv is also present in /Channel/Parameters
"""
self._check_ids_in_channel_for_optional_branch('TxRcv')
def _check_refgeom_parameters(self, xml_node, expected_parameters):
for xml_path, expected_value in expected_parameters.items():
if isinstance(expected_value, np.ndarray) and expected_value.size == 3:
parser = parsers.parse_xyz
else:
parser = parsers.parse_text
approx_args = {}
if 'Angle' in xml_path:
approx_args['atol'] = 1
elif xml_path.endswith('Time'):
approx_args['atol'] = 1e-6
elif xml_path == 'ARPPos':
approx_args['atol'] = 1e-2
elif xml_path == 'ARPVel':
approx_args['atol'] = 1e-3
actual_value = parser(xml_node.find(f'./{xml_path}'))
if issubclass(np.asarray(expected_value).dtype.type, numbers.Number):
actual_value = con.Approx(actual_value, **approx_args)
with self.need(f'{xml_path} matches defined PVP/calculation'):
assert np.all(expected_value == actual_value)
def check_refgeom_root(self):
"""
The ReferenceGeometry branch root parameters match the PVPs/defined calculations
"""
with self.precondition():
assert self.pvps is not None
refgeom = calc_refgeom_parameters(self.xml, self.pvps).refgeom
self._check_refgeom_parameters(self.xml.find('./ReferenceGeometry'), refgeom)
def check_refgeom_monostatic(self):
"""
The ReferenceGeometry branch Monostatic parameters are present and match the PVPs/defined calculations
"""
with self.precondition():
assert self.xml.findtext('./CollectionID/CollectType') == 'MONOSTATIC'
refgeom_mono = self.xml.find('./ReferenceGeometry/Monostatic')
with self.need("ReferenceGeometry type matches CollectType"):
assert refgeom_mono is not None
assert self.pvps is not None
monostat = calc_refgeom_parameters(self.xml, self.pvps).monostat
self._check_refgeom_parameters(refgeom_mono, monostat)
def check_refgeom_bistatic(self):
"""
The ReferenceGeometry branch Bistatic parameters are present and match the PVPs/defined calculations
"""
with self.precondition():
assert self.xml.findtext('./CollectionID/CollectType') == 'BISTATIC'
refgeom_bistat = self.xml.find('./ReferenceGeometry/Bistatic')
with self.need("ReferenceGeometry type matches CollectType"):
assert refgeom_bistat is not None
assert self.pvps is not None
bistat = calc_refgeom_parameters(self.xml, self.pvps).bistat
self._check_refgeom_parameters(refgeom_bistat, bistat)
def check_unconnected_ids(self):
"""
Check that all identifiers are connected back to the Data branch.
"""
with self.precondition():
assert have_networkx
id_graph = make_id_graph(self.xml)
data_subgraph = id_graph.subgraph(nx.shortest_path(id_graph, 'Data'))
no_data_subgraph = id_graph.copy()
no_data_subgraph.remove_nodes_from(data_subgraph)
unconnected_ids = [] if no_data_subgraph is None else [x for x in no_data_subgraph.nodes if '<' in x]
with self.want("All IDs connect to Data branch"):
assert not unconnected_ids
def check_identifier_uniqueness(self):
"""
Identifier nodes are unique.
"""
identifier_sets = (
{'./Antenna/AntCoordFrame/Identifier'},
{'./Antenna/AntPattern/Identifier'},
{'./Antenna/AntPhaseCenter/Identifier'},
{'./Channel/Parameters/Identifier'},
{'./Data/Channel/Identifier'},
{'./Data/SupportArray/Identifier'},
{'./Dwell/CODTime/Identifier'},
{'./Dwell/DwellTime/Identifier'},
{'./SceneCoordinates/ImageGrid/SegmentList/Segment/Identifier'},
{'./TxRcv/RcvParameters/Identifier'},
{'./TxRcv/TxWFParameters/Identifier'},
{f'./SupportArray/{sa_type}/Identifier' for sa_type in ('IAZArray', 'AntGainPhase', 'AddedSupportArray')},
)
for identifier_set in identifier_sets:
these_identifiers = []
for path in identifier_set:
these_identifiers.extend(x.text for x in self.xml.findall(path))
repeated_identifiers = _get_repeated_elements(these_identifiers)
with self.need(f'Identifiers {identifier_set} are unique'):
assert not repeated_identifiers
def check_polynomials(self):
"""
Polynomial types are correctly specified.
"""
def check_poly(poly_elem):
path = poly_elem.getroottree().getpath(poly_elem)
order_by_dim = {dim: int(poly_elem.get(f'order{dim}'))
for dim in (1, 2) if poly_elem.get(f'order{dim}') is not None}
coef_exponents = [tuple(int(coef.get(f'exponent{dim}')) for dim in order_by_dim)
for coef in poly_elem.findall('./Coef')]
repeated_coef_exponents = _get_repeated_elements(coef_exponents)
with self.need(f'{path} is correctly specified'):
for index, order in enumerate(order_by_dim.values()):
dim_coefs_above_order = [coef_exp[index] for coef_exp in coef_exponents if coef_exp[index] > order]
assert not dim_coefs_above_order
assert not repeated_coef_exponents
poly_paths = itertools.chain(
[f'./Antenna/AntPattern/{j}/{k}Poly' for j, k in itertools.product(('Array', 'Element'),
('Gain', 'Phase'))],
[f'./Antenna/AntCoordFrame/{axis}AxisPoly/{comp}' for axis, comp in itertools.product('XY', 'XYZ')],
['./Antenna/AntPattern/GainBSPoly'],
[f'./Antenna/AntPattern/EB/DC{ax}Poly' for ax in 'XY'],
[f'./Dwell/{x}Time/{x}TimePoly' for x in ('COD', 'Dwell')],
)
for element_path in poly_paths:
for poly in self.xml.findall(element_path):
check_poly(poly)
def check_optional_pvps_fx(self):
"""
FXN1 & FXN2 PVPs are included appropriately.
"""
is_fx_domain = self.xml.findtext('./Global/DomainType') == 'FX'
has_fxn1 = self.xml.findtext('./PVP/FXN1') is not None
has_fxn2 = self.xml.findtext('./PVP/FXN2') is not None
with self.need('FXN1/FXN2 only allowed when /Global/DomainType = FX and must be included together'):
assert not(has_fxn1 or has_fxn2) or (is_fx_domain and has_fxn1 and has_fxn2)
def check_optional_pvps_toa(self):
"""
TOAE1 & TOAE2 PVPs are included appropriately.
"""
has_toae1 = self.xml.findtext('./PVP/TOAE1') is not None
has_toae2 = self.xml.findtext('./PVP/TOAE2') is not None
with self.need('TOAE1/TOAE2 must be included together'):
assert has_toae1 == has_toae2
def _get_repeated_elements(items):
return [x for x, count in collections.Counter(items).items() if count > 1]
def unit(vec, axis=-1):
return vec / np.linalg.norm(vec, axis=axis, keepdims=True)
def calc_refgeom_parameters(xml, pvps):
"""
Calculate expected reference geometry parameters given CPHD XML and PVPs (CPHD1.0.1, Sec 6.5)
"""
# 6.5.1 - Reference Vector Parameters
ref_id = xml.findtext('./Channel/RefChId')
ref_chan_parameters = get_by_id(xml, './Channel/Parameters/', ref_id)
v_ch_ref = int(ref_chan_parameters.findtext('RefVectorIndex'))
ref_vector = pvps[ref_id][v_ch_ref]
txc = ref_vector['TxTime']
xmt = ref_vector['TxPos']
vxmt = ref_vector['TxVel']
trc_srp = ref_vector['RcvTime']
rcv = ref_vector['RcvPos']
vrcv = ref_vector['RcvVel']
srp = ref_vector['SRPPos']
ref_dwelltimes = get_by_id(xml, './Channel/Parameters/', ref_id).find('./DwellTimes')
ref_cod_id = ref_dwelltimes.findtext('CODId')
ref_dwell_id = ref_dwelltimes.findtext('DwellId')
xy2cod = parsers.parse_poly2d(get_by_id(xml, './Dwell/CODTime', ref_cod_id).find('./CODTimePoly'))
xy2dwell = parsers.parse_poly2d(get_by_id(xml, './Dwell/DwellTime', ref_dwell_id).find('./DwellTimePoly'))
# (1) See also Section 6.2
srp_llh = geocoords.ecf_to_geodetic(srp, 'latlong')
srp_lat, srp_lon = np.deg2rad(srp_llh[:2])
ref_surface = xml.find('./SceneCoordinates/ReferenceSurface/Planar')
if ref_surface is None: # TODO: Add HAE
raise NotImplementedError("Non-Planar reference surfaces (e.g. HAE) are currently not supported.")
iax = parsers.parse_xyz(ref_surface.find('./uIAX'))
iay = parsers.parse_xyz(ref_surface.find('./uIAY'))
iarp = parsers.parse_xyz(xml.find('./SceneCoordinates/IARP/ECF'))
srp_iac = np.dot([iax, iay, unit(np.cross(iax, iay))], srp - iarp)
# (2)
srp_dec = np.linalg.norm(srp)
uec_srp = srp / srp_dec
# (3)
ueast = np.array((-np.sin(srp_lon),
np.cos(srp_lon),
0))
unor = np.array((-np.sin(srp_lat) * np.cos(srp_lon),
-np.sin(srp_lat) * np.sin(srp_lon),
np.cos(srp_lat)))
uup = np.array((np.cos(srp_lat) * np.cos(srp_lon),
np.cos(srp_lat) * np.sin(srp_lon),
np.sin(srp_lat)))
# (4)
r_xmt_srp = np.linalg.norm(xmt - srp)
r_rcv_srp = np.linalg.norm(rcv - srp)
# (5)
t_ref = txc + r_xmt_srp / (r_xmt_srp + r_rcv_srp) * (trc_srp - txc)
# (6)
t_cod_srp = npp.polyval2d(*srp_iac[:2], c=xy2cod)
t_dwell_srp = npp.polyval2d(*srp_iac[:2], c=xy2dwell)
# (7)
refgeom = {'SRP/ECF': srp,
'SRP/IAC': srp_iac,
'ReferenceTime': t_ref,
'SRPCODTime': t_cod_srp,
'SRPDwellTime': t_dwell_srp}
def calc_apc_parameters(position, velocity):
"""Calculate APC parameters given a position and velocity.
Use arp/varp variable substitution for similarity with CPHD v1.0.1 Section 6.5.2
"""
# (1)
arp = position
varp = velocity
# (2)
r_arp_srp = np.linalg.norm(arp - srp)
uarp = (arp - srp) / r_arp_srp
rdot_arp_srp = np.dot(uarp, varp)
# (3)
arp_dec = np.linalg.norm(arp)
uec_arp = arp / arp_dec
# (4)
ea_arp = np.arccos(np.dot(uec_arp, uec_srp))
rg_arp_srp = srp_dec * ea_arp
# (5)
varp_m = np.linalg.norm(varp)
uvarp = varp / varp_m
left = np.cross(uec_arp, uvarp)
# (6)
look = +1 if np.dot(left, uarp) < 0 else -1
side_of_track = 'L' if look == +1 else 'R'
# (7)
dca = np.arccos(-rdot_arp_srp / varp_m)
# (8)
ugpz = uup
gpy = np.cross(uup, uarp)
ugpy = unit(gpy)
ugpx = np.cross(ugpy, ugpz)
# (9)
graz = np.arccos(np.dot(uarp, ugpx))
# incidence angle in (15)
# (10)
gpx_n = np.dot(ugpx, unor)
gpx_e = np.dot(ugpx, ueast)
azim = np.arctan2(gpx_e, gpx_n)
# (11)
uspn = unit(look * np.cross(uarp, uvarp))
# (12)
twst = -np.arcsin(np.dot(uspn, ugpy))
# (13)
slope = np.arccos(np.dot(ugpz, uspn))
# (14)
lodir_n = np.dot(-uspn, unor)
lodir_e = np.dot(-uspn, ueast)
lo_ang = np.arctan2(lodir_e, lodir_n)
# (15)
return {'ARPPos': arp,
'ARPVel': varp,
'SideOfTrack': side_of_track,
'SlantRange': r_arp_srp,
'GroundRange': rg_arp_srp,
'DopplerConeAngle': np.rad2deg(dca),
'GrazeAngle': np.rad2deg(graz),
'IncidenceAngle': 90 - np.rad2deg(graz),
'AzimuthAngle': np.rad2deg(azim) % 360,
'TwistAngle': np.rad2deg(twst),
'SlopeAngle': np.rad2deg(slope),
'LayoverAngle': np.rad2deg(lo_ang) % 360}
def calc_apc_parameters_bi(platform, time, position, velocity):
apc_params = calc_apc_parameters(position, velocity)
apc_params['Time'] = time
apc_params['Pos'] = apc_params.pop('ARPPos')
apc_params['Vel'] = apc_params.pop('ARPVel')
del apc_params['TwistAngle']
del apc_params['SlopeAngle']
del apc_params['LayoverAngle']
# Conditions unique to bistatic (6.5.3 18-19)
if np.linalg.norm(velocity) == 0:
apc_params['DopplerConeAngle'] = 90
apc_params['SideOfTrack'] = 'L'
if apc_params['GroundRange'] == 0:
apc_params['GrazeAngle'] = 90
apc_params['IncidenceAngle'] = 0
apc_params['AzimuthAngle'] = 0
return {'{platform}Platform/{k}'.format(platform=platform, k=k): v for k, v in apc_params.items()}
def calc_refgeom_mono():
return calc_apc_parameters((xmt + rcv) / 2, (vxmt + vrcv) / 2)
def calc_refgeom_bi():
# 6.5.3 Reference Geometry: Collect Type = BISTATIC
# (1)
uxmt = (xmt - srp) / r_xmt_srp
rdot_xmt_srp = np.dot(uxmt, vxmt)
uxmtdot = (vxmt - np.dot(rdot_xmt_srp, uxmt)) / r_xmt_srp
# (2)
urcv = (rcv - srp) / r_rcv_srp
rdot_rcv_srp = np.dot(urcv, vrcv)
urcvdot = (vrcv - np.dot(rdot_rcv_srp, urcv)) / r_rcv_srp
# (3)
bp = (uxmt + urcv) / 2
bpdot = (uxmtdot + urcvdot) / 2
# (4)
bp_mag = np.linalg.norm(bp)
bistat_ang = 2 * np.arccos(bp_mag)
# (5)
bistat_ang_rate = 0.0 if bp_mag in (0, 1) else -(4 * np.dot(bp, bpdot) / np.sin(bistat_ang))
# (6)
ugpz = uup
bp_gpz = np.dot(bp, ugpz)
bp_gp = bp - np.dot(bp_gpz, ugpz)
bp_gpx = np.linalg.norm(bp_gp)
# (7)
ubgpx = bp_gp / bp_gpx
ubgpy = np.cross(ugpz, ubgpx)
# (8)
bistat_graz = np.arctan(bp_gpz / bp_gpx)
# (9)
bgpx_n = np.dot(ubgpx, unor)
bgpx_e = np.dot(ubgpx, ueast)
bistat_azim = np.arctan2(bgpx_e, bgpx_n)
# (10)
bpdot_bgpy = np.dot(bpdot, ubgpy)
bistat_azim_rate = -(bpdot_bgpy / bp_gpx)
# (11)
bistat_sgn = +1 if bpdot_bgpy > 0 else -1
# (12)
ubp = bp / bp_mag
bpdotp = np.dot(bpdot, ubp) * ubp
bpdotn = bpdot - bpdotp
# (13)
bipn = bistat_sgn * np.cross(bp, bpdotn)
ubipn = unit(bipn)
# (14)
bistat_twst = -np.arcsin(np.dot(ubipn, ubgpy))
# (15)
bistat_slope = np.arccos(np.dot(ugpz, ubipn))
# (16)
b_lodir_n = np.dot(-ubipn, unor)
b_lodir_e = np.dot(-ubipn, ueast)
bistat_lo_ang = np.arctan2(b_lodir_e, b_lodir_n)
# Caveat in (6)
if bp_gpx == 0:
bistat_azim = 0
bistat_azim_rate = 0
bistat_graz = 0
bistat_twst = 0
bistat_slope = 0
bistat_lo_ang = 0
# Caveat in (10)
if bpdot_bgpy == 0:
bistat_twst = 0
bistat_slope = 0
bistat_lo_ang = 0
refgeom_bi = {
# (17)
'AzimuthAngle': np.rad2deg(bistat_azim) % 360,
'AzimuthAngleRate': np.rad2deg(bistat_azim_rate),
'BistaticAngle': np.rad2deg(bistat_ang),
'BistaticAngleRate': np.rad2deg(bistat_ang_rate),
'GrazeAngle': np.rad2deg(bistat_graz),
'TwistAngle': np.rad2deg(bistat_twst),
'SlopeAngle': np.rad2deg(bistat_slope),
'LayoverAngle': np.rad2deg(bistat_lo_ang) % 360
}
# (18)
refgeom_bi.update(calc_apc_parameters_bi('Tx', txc, xmt, vxmt))
# (19)
refgeom_bi.update(calc_apc_parameters_bi('Rcv', trc_srp, rcv, vrcv))
return refgeom_bi
mono = calc_refgeom_mono()
bistat = calc_refgeom_bi()
return collections.namedtuple('refgeom_params', 'refgeom monostat bistat')(refgeom, mono, bistat)
def make_id_graph(xml):
"""
Make an undirected graph with CPHD identifiers as nodes and edges from correspondence and hierarchy.
Nodes are named as {xml_path}<{id}, e.g. /Data/Channel/Identifier<Ch1
There is a single "Data" node formed from the Data branch root that signifies data that can be read from the file
Args
----
xml: `lxml.etree.ElementTree.Element`
Root CPHD XML node
Returns
-------
id_graph: `networkx.Graph`
Undirected graph
* nodes: Data node, CPHD identifiers
* edges: Parent identifiers to child identifiers; corresponding identifiers across XML branches
"""
id_graph = nx.Graph()
def add_id_nodes_from_path(xml_path):
id_graph.add_nodes_from(["{}<{}".format(xml_path, n.text) for n in xml.findall('.' + xml_path)])
def add_id_nodes_from_path_with_connected_root(xml_path):
root_node = xml_path.split('/')[1]
id_graph.add_edges_from(zip(itertools.repeat(root_node),
["{}<{}".format(xml_path, n.text) for n in xml.findall('.' + xml_path)]))
def get_id_from_node_name(node_name):
return node_name.split('<')[-1]
def connect_matching_id_nodes(path_a, path_b):
all_nodes = list(id_graph.nodes)
all_a = {get_id_from_node_name(x): x for x in all_nodes if x.split('<')[0] == path_a}
all_b = {get_id_from_node_name(x): x for x in all_nodes if x.split('<')[0] == path_b}
for k in set(all_a).intersection(all_b):
id_graph.add_edge(all_a[k], all_b[k])
def add_and_connect_id_nodes(path_a, path_b):
add_id_nodes_from_path(path_a)
add_id_nodes_from_path(path_b)
connect_matching_id_nodes(path_a, path_b)
def add_and_connect_children(parent_path, parent_id_name, children_paths):
for parent in xml.findall('.' + parent_path):
parent_id = parent.findtext(parent_id_name)
for child_path in children_paths:
for child in parent.findall('.' + child_path):
id_graph.add_edge('{}/{}<{}'.format(parent_path, parent_id_name, parent_id),
'{}/{}<{}'.format(parent_path, child_path, child.text))
add_id_nodes_from_path_with_connected_root('/Data/Channel/Identifier')
add_id_nodes_from_path_with_connected_root('/Data/SupportArray/Identifier')
channel_children = ['DwellTimes/CODId', 'DwellTimes/DwellId']
channel_children += ['Antenna/'+ident for ident in ('TxAPCId', 'TxAPATId', 'RcvAPCId', 'RcvAPATId')]
channel_children += ['TxRcv/TxWFId', 'TxRcv/RcvId']
add_and_connect_children('/Channel/Parameters', 'Identifier', channel_children)
connect_matching_id_nodes('/Data/Channel/Identifier', '/Channel/Parameters/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/IAZArray/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/AntGainPhase/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/AddedSupportArray/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/DwellTimes/CODId', '/Dwell/CODTime/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/DwellTimes/DwellId', '/Dwell/DwellTime/Identifier')
add_and_connect_id_nodes('/Antenna/AntCoordFrame/Identifier', '/Antenna/AntPhaseCenter/ACFId')
add_and_connect_children('/Antenna/AntPattern', 'Identifier',
('GainPhaseArray/ArrayId', 'GainPhaseArray/ElementId'))
add_and_connect_children('/Antenna/AntPhaseCenter', 'Identifier', ('ACFId',))
add_and_connect_id_nodes('/Channel/Parameters/Antenna/TxAPCId', '/Antenna/AntPhaseCenter/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/TxAPATId', '/Antenna/AntPattern/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/RcvAPCId', '/Antenna/AntPhaseCenter/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/RcvAPATId', '/Antenna/AntPattern/Identifier')
connect_matching_id_nodes('/SupportArray/AntGainPhase/Identifier', '/Antenna/AntPattern/GainPhaseArray/ArrayId')
connect_matching_id_nodes('/SupportArray/AntGainPhase/Identifier', '/Antenna/AntPattern/GainPhaseArray/ElementId')
add_and_connect_id_nodes('/Channel/Parameters/TxRcv/TxWFId', '/TxRcv/TxWFParameters/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/TxRcv/RcvId', '/TxRcv/RcvParameters/Identifier')
return id_graph
def main(args=None):
"""
CphdConsistency CLI tool. Print results to stdout.
Parameters
----------
args: None|List[str]
List of CLI argument strings. If None use sys.argv
"""
parser = argparse.ArgumentParser(description="Analyze a CPHD and display inconsistencies")
parser.add_argument('cphd_or_xml')
parser.add_argument('-v', '--verbose', default=0,
action='count', help="Increase verbosity (can be specified more than once >4 doesn't help)")
parser.add_argument('--schema', help="Use a supplied schema file (attempts version-specific schema if omitted)")
parser.add_argument('--noschema', action='append_const', const='check_against_schema', dest='ignore',
help="Disable schema checks")
parser.add_argument('--signal-data', action='store_true', help="Check the signal data for NaN and +/- Inf")
parser.add_argument('--ignore', action='append', metavar='PATTERN',
help=("Skip any check matching PATTERN at the beginning of its name. Can be specified more than"
" once."))
config = parser.parse_args(args)
# Some questionable abuse of the pytest internals
import ast
import _pytest.assertion.rewrite
base, ext = os.path.splitext(__file__) # python2 can return the '*.pyc' file
with open(base + '.py', 'r') as fd:
source = fd.read()
tree = ast.parse(source)
try:
_pytest.assertion.rewrite.rewrite_asserts(tree)
except TypeError as e:
_pytest.assertion.rewrite.rewrite_asserts(tree, source)
co = compile(tree, __file__, 'exec', dont_inherit=True)
ns = {}
exec(co, ns)
cphd_con = ns['CphdConsistency'].from_file(config.cphd_or_xml, config.schema, config.signal_data)
cphd_con.check(ignore_patterns=config.ignore)
failures = cphd_con.failures()
cphd_con.print_result(fail_detail=config.verbose >= 1,
include_passed_asserts=config.verbose >= 2,
include_passed_checks=config.verbose >= 3,
skip_detail=config.verbose >= 4)
return bool(failures)
if __name__ == "__main__": # pragma: no cover
import sys
sys.exit(int(main()))
| 85,361 | 42.133906 | 120 | py |
sarpy | sarpy-master/sarpy/consistency/parsers.py | #
# Copyright 2020-2021 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
__classification__ = "UNCLASSIFIED"
__author__ = "Nathan Bombaci, Valkyrie"
from typing import List
import numpy as np
def parse_text(elem):
"""
Reverse of `xml.make_elem` by converting an element's text string to
an int, float, bool, or str, as appropriate.
Parameters
----------
elem: lxml.etree.ElementTree.Element
Element to convert to the most restrictive python type possible.
Returns
-------
val: int|float|bool|str
Converted value.
"""
for converter in (int, float, parse_bool_text, str):
try:
val = converter(elem.text)
break
except ValueError:
continue
return val
def parse_bool_text(text):
"""
Gets a boolean from a string.
Parameters
----------
text: str
One of `'true', '1', 'false', '0'`.
Returns
-------
val: bool
Boolean value converted from `text`.
Raises
------
ValueError
The text string is not either ``'true'`` or ``'false'``.
"""
text = text.lower()
if text in ['true', '1']:
return True
if text in ['false', '0']:
return False
raise ValueError("Cannot parse bool from {}".format(text))
def parse_bool(elem):
"""
Gets a boolean from an element.
Parameters
----------
elem : lxml.etree.ElementTree.Element
Element to convert.
Returns
-------
val : bool
Boolean value of the `elem`'s text.
"""
return parse_bool_text(elem.text)
def parse_sequence(node, keys, conversion=parse_text):
"""
Reverse of `sequence_node`.
Parameters
----------
node : lxml.etree.ElementTree.Element
Element containing a sequence node.
keys : List
List of element names to parse.
conversion : Callable
Conversion function. (Default: `parse_text`)
Returns
------
List
List of parsed values, one for each element of `keys`.
"""
return [conversion(node.find('./' + key)) for key in keys]
def parse_xyz(node):
"""
Parse a node with ``'X'``, ``'Y'``, and ``'Z'`` children
Parameters
----------
node : lxml.etree.ElementTree.Element
Element containing an XYZ sequence node.
Returns
-------
List
List [X, Y, Z]. Parsed values.
"""
return parse_sequence(node, ['X', 'Y', 'Z'], lambda x: float(x.text))
def parse_xy(node):
"""
Parse a node with ``'X'`` and ``'Y'`` children.
Parameters
----------
node: lxml.etree.ElementTree.Element
Element containing an XY sequence node.
Returns
-------
List
List [X, Y]. Parsed values.
"""
return parse_sequence(node, ['X', 'Y'], lambda x: float(x.text))
def parse_ll(node):
"""
Parse a node with ``'Lat'`` and ``'Lon'`` children.
Parameters
----------
node: lxml.etree.ElementTree.Element
Element containing a Lat/Lon sequence node.
Returns
-------
List
List [Lon, Lat]. Parsed values as radians.
"""
return np.radians(float(node.findtext('Lon'))), np.radians(float(node.findtext('Lat')))
def parse_llh(node):
"""
Parse a node with ``'Lat'``, ``'Lon'``, ``'HAE'`` children.
Parameters
----------
node: lxml.etree.ElementTree.Element
Element containing a Lat/Lon/HAE sequence node.
Returns
-------
List
List [Lon, Lat, HAE]. Parsed Lon, Lat values as radians, HAE value as meters.
"""
return np.radians(float(node.findtext('Lon'))),\
np.radians(float(node.findtext('Lat'))),\
float(node.findtext('HAE'))
def parse_poly2d(node):
"""
Parse a node with ``'exponent1'`` and ``'exponent2'`` children.
Args
----
node: `lxml.etree.ElementTree.Element`
Element containing a poly2d node.
Returns
-------
result: list of list, shape=(:, :)
A list of coefficient values.
"""
coefs = node.findall('./Coef')
num_coefs1 = max([int(coef.get('exponent1')) for coef in coefs]) + 1
num_coefs2 = max([int(coef.get('exponent2')) for coef in coefs]) + 1
poly2d = np.zeros((num_coefs1, num_coefs2), np.float64)
for coef in coefs:
poly2d[int(coef.get('exponent1')), int(coef.get('exponent2'))] = float(coef.text)
return poly2d.tolist()
| 4,465 | 21 | 91 | py |
sarpy | sarpy-master/sarpy/consistency/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/consistency/sidd_consistency.py | """
A module for performing a selection of validation checks on a SIDD (nitf) file,
or an xml file containing the sidd structure.
Use the `check_file` function directly, or perform using the command line
>>> python -m sarpy.consistency.sidd_consistency <file_name>
For more information, about command line usage, see
>>> python -m sarpy.consistency.sidd_consistency --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import sys
import argparse
import os
import re
from sarpy.consistency.sicd_consistency import check_sicd_data_extension
from sarpy.io.general.nitf import NITFDetails
from sarpy.io.general.nitf_elements.des import DataExtensionHeader, \
DataExtensionHeader0
from sarpy.io.xml.base import parse_xml_from_string, validate_xml_from_string
from sarpy.io.product.sidd_schema import get_schema_path, get_urn_details, \
get_specification_identifier
from sarpy.io.product.sidd3_elements.SIDD import SIDDType as SIDDType3
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
logger = logging.getLogger('validation')
def evaluate_xml_versus_schema(xml_string, urn_string):
"""
Check validity of the xml string versus the appropriate schema.
Parameters
----------
xml_string : str|bytes
urn_string : str
Returns
-------
bool
"""
# get schema path
try:
the_schema = get_schema_path(urn_string)
except KeyError:
logger.exception('SIDD: Failed finding the schema for urn {}'.format(urn_string))
return False
try:
return validate_xml_from_string(xml_string, the_schema, output_logger=logger)
except ImportError:
return None
def _evaluate_xml_string_validity(xml_string=None):
"""
Check the validity of the SIDD xml, as defined by the given string.
Parameters
----------
xml_string : str|bytes
Returns
-------
is_valid : bool
sidd_urn : str
sidd : SIDDType1|SIDDType2|SIDDType3
"""
root_node, xml_ns = parse_xml_from_string(xml_string)
if 'default' not in xml_ns:
raise ValueError(
'Could not properly interpret the namespace collection from xml\n{}'.format(xml_ns))
sidd_urn = xml_ns['default']
# check that our urn is mapped
try:
_ = get_urn_details(sidd_urn)
check_schema = True
except Exception as e:
logger.exception('SIDD: The SIDD namespace has unrecognized value')
check_schema = False
valid_xml = None
if check_schema:
valid_xml = evaluate_xml_versus_schema(xml_string, sidd_urn)
if valid_xml is None:
valid_xml = True
# perform the various sidd structure checks
if sidd_urn == 'urn:SIDD:1.0.0':
the_sidd = SIDDType1.from_node(root_node, xml_ns=xml_ns)
valid_sidd_contents = the_sidd.is_valid(recursive=True, stack=False)
elif sidd_urn == 'urn:SIDD:2.0.0':
the_sidd = SIDDType2.from_node(root_node, xml_ns=xml_ns)
valid_sidd_contents = the_sidd.is_valid(recursive=True, stack=False)
elif sidd_urn == 'urn:SIDD:3.0.0':
the_sidd = SIDDType3.from_node(root_node, xml_ns=xml_ns)
valid_sidd_contents = the_sidd.is_valid(recursive=True, stack=False)
else:
raise ValueError('Got unhandled urn {}'.format(sidd_urn))
return valid_xml & valid_sidd_contents, sidd_urn, the_sidd
def check_sidd_data_extension(nitf_details, des_header, xml_string):
"""
Evaluate a SIDD data extension for validity.
Parameters
----------
nitf_details : NITFDetails
des_header : DataExtensionHeader|DataExtensionHeader0
xml_string : str|bytes
Returns
-------
is_valid : bool
sidd : SIDDType1|SIDDType2|SIDDType3
"""
def check_des_header_fields():
# type: () -> bool
des_id = des_header.DESID.strip() if nitf_details.nitf_version == '02.10' else des_header.DESTAG.strip()
if des_id != 'XML_DATA_CONTENT':
logger.warning('SIDD: Found old style SIDD DES Header. This is deprecated.')
return True
# make sure that the NITF urn is evaluated for sensibility
nitf_urn = des_header.UserHeader.DESSHTN.strip()
try:
nitf_urn_details = get_urn_details(nitf_urn)
except Exception:
logger.exception('SIDD: The SIDD DES.DESSHTN must be a recognized urn')
return False
# make sure that the NITF urn and SICD urn actually agree
header_good = True
if nitf_urn != xml_urn:
logger.error('SIDD: The SIDD DES.DESSHTN ({}) and urn ({}) must agree'.format(nitf_urn, xml_urn))
header_good = False
# make sure that the NITF DES fields are populated appropriately for NITF urn
if des_header.UserHeader.DESSHSI.strip() != get_specification_identifier():
logger.error(
'SIDD: DES.DESSHSI has value `{}`,\n\tbut should have value `{}`'.format(
des_header.UserHeader.DESSHSI.strip(), get_specification_identifier()))
header_good = False
nitf_version = nitf_urn_details['version']
if des_header.UserHeader.DESSHSV.strip() != nitf_version:
logger.error(
'SIDD: DES.DESSHSV has value `{}`,\n\tbut should have value `{}` based on DES.DESSHTN `{}`'.format(
des_header.UserHeader.DESSHSV.strip(), nitf_version, nitf_urn))
header_good = False
nitf_date = nitf_urn_details['date']
if des_header.UserHeader.DESSHSD.strip() != nitf_date:
logger.warning(
'SIDD: DES.DESSHSD has value `{}`,\n\tbut should have value `{}` based on DES.DESSHTN `{}`'.format(
des_header.UserHeader.DESSHSV.strip(), nitf_date, nitf_urn))
return header_good
def compare_sidd_class():
# type: () -> bool
if the_sidd.ProductCreation is None or the_sidd.ProductCreation.Classification is None or \
the_sidd.ProductCreation.Classification.classification is None:
logger.error(
'SIDD: SIDD.ProductCreation.Classification.classification is not populated,\n\t'
'so can not be compared with SIDD DES.DESCLAS `{}`'.format(des_header.Security.CLAS.strip()))
return False
extracted_class = the_sidd.ProductCreation.Classification.classification
if extracted_class != des_header.Security.CLAS.strip():
logger.warning(
'SIDD: DES.DESCLAS is `{}`,\n\tand SIDD.ProductCreation.Classification.classification '
'is {}'.format(des_header.Security.CLAS.strip(), extracted_class))
if des_header.Security.CLAS.strip() != nitf_details.nitf_header.Security.CLAS.strip():
logger.warning(
'SIDD: DES.DESCLAS is `{}`,\n\tand NITF.CLAS is `{}`'.format(
des_header.Security.CLAS.strip(), nitf_details.nitf_header.Security.CLAS.strip()))
return True
# check sicd xml structure for validity
valid_sicd, xml_urn, the_sidd = _evaluate_xml_string_validity(xml_string)
# check that the sicd information and header information appropriately match
valid_header = check_des_header_fields()
# check that the classification seems to make sense
valid_class = compare_sidd_class()
return valid_sicd & valid_header & valid_class, the_sidd
def check_sidd_file(nitf_details):
"""
Check the validity of the given NITF file as a SICD file.
Parameters
----------
nitf_details : str|NITFDetails
Returns
-------
bool
"""
def find_des():
for i in range(nitf_details.des_subheader_offsets.size):
subhead_bytes = nitf_details.get_des_subheader_bytes(i)
if nitf_details.nitf_version == '02.00':
des_header = DataExtensionHeader0.from_bytes(subhead_bytes, start=0)
elif nitf_details.nitf_version == '02.10':
des_header = DataExtensionHeader.from_bytes(subhead_bytes, start=0)
else:
raise ValueError('Got unhandled NITF version {}'.format(nitf_details.nitf_version))
if subhead_bytes.startswith(b'DEXML_DATA_CONTENT'):
des_bytes = nitf_details.get_des_bytes(i).decode('utf-8').strip().encode()
# noinspection PyBroadException
try:
root_node, xml_ns = parse_xml_from_string(des_bytes)
if 'SIDD' in root_node.tag: # namespace makes this ugly
sidd_des.append((i, des_bytes, root_node, xml_ns, des_header))
elif 'SICD' in root_node.tag:
sicd_des.append((i, des_bytes, root_node, xml_ns, des_header))
except Exception:
continue
elif subhead_bytes.startswith(b'DESIDD_XML'):
# This is an old format SIDD
des_bytes = nitf_details.get_des_bytes(i).decode('utf-8').strip().encode()
try:
root_node, xml_ns = parse_xml_from_string(des_bytes)
if 'SIDD' in root_node.tag: # namespace makes this ugly
sidd_des.append((i, des_bytes, root_node, xml_ns, des_header))
except Exception as e:
logger.exception('SIDD: Old-style SIDD DES header at index {}, but failed parsing'.format(i))
continue
elif subhead_bytes.startswith(b'DESICD_XML'):
# This is an old format SICD
des_bytes = nitf_details.get_des_bytes(i).decode('utf-8').strip().encode()
try:
root_node, xml_ns = parse_xml_from_string(des_bytes)
if 'SICD' in root_node.tag: # namespace makes this ugly
sicd_des.append((i, des_bytes, root_node, xml_ns, des_header))
except Exception as e:
logger.exception('SIDD: Old-style SICD DES header at index {}, but failed parsing'.format(i))
continue
def check_image_data():
valid_images = True
# verify that all images have the correct pixel type
for i, img_header in enumerate(nitf_details.img_headers):
if img_header.ICAT.strip() != 'SAR':
continue
iid1 = img_header.IID1.strip()
if re.match(r'^SIDD\d\d\d\d\d\d', iid1) is None:
valid_images = False
logger.error(
'SIDD: image segment at index {} of {} has IID1 = `{}`,\n\t'
'expected to be of the form `SIDDXXXYYY`'.format(i, len(nitf_details.img_headers), iid1))
continue
sidd_index = int(iid1[4:7])
if not (0 < sidd_index <= len(sidd_des)):
valid_images = False
logger.error(
'SIDD: image segment at index {} of {} has IID1 = `{}`,\n\t'
'it is unclear with which of the {} SIDDs '
'this is associated'.format(i, len(nitf_details.img_headers), iid1, len(sidd_des)))
continue
has_image[sidd_index - 1] = True
type_information = sidd_nitf_details[sidd_index - 1]
pixel_type = type_information['pixel_type']
if pixel_type is None:
continue # we already noted the failure here
exp_nbpp, exp_pvtype = type_information['nbpp'], type_information['pvtype']
if img_header.PVTYPE.strip() != exp_pvtype:
valid_images = False
logger.error(
'SIDD: image segment at index {} of {} has PVTYPE = `{}`,\n\t'
'expected to be `{}` based on pixel type {}'.format(
i, len(nitf_details.img_headers), img_header.PVTYPE.strip(), exp_pvtype, pixel_type))
if img_header.NBPP != exp_nbpp:
valid_images = False
logger.error(
'SIDD: image segment at index {} of {} has NBPP = `{}`,\n\t'
'expected to be `{}` based on pixel type {}'.format(
i, len(nitf_details.img_headers), img_header.NBPP, exp_nbpp, pixel_type))
for sidd_index, entry in enumerate(has_image):
if not entry:
logger.error(
'SIDD: No image segments appear to be associated with the sidd at index {}'.format(sidd_index))
valid_images = False
return valid_images
if isinstance(nitf_details, str):
if not os.path.isfile(nitf_details):
raise ValueError('Got string input, but it is not a valid path')
nitf_details = NITFDetails(nitf_details)
if not isinstance(nitf_details, NITFDetails):
raise TypeError(
'Input is expected to be a path to a NITF file, or a NITFDetails object instance')
sidd_des = []
sicd_des = []
sidd_nitf_details = []
has_image = []
find_des()
if len(sidd_des) < 1:
logger.error('SIDD: No SIDD DES found, this is not a valid SIDD file.')
return False
valid_sidd_des = True
for entry in sidd_des:
this_sidd_valid, the_sidd = check_sidd_data_extension(nitf_details, entry[4], entry[1])
valid_sidd_des &= this_sidd_valid
has_image.append(False)
if the_sidd.Display is None or the_sidd.Display.PixelType is None:
valid_sidd_des = False
logger.error('SIDD: SIDD.Display.PixelType is not populated, and can not be compared to NITF image details')
sidd_nitf_details.append({'pixel_type': None})
elif the_sidd.Display.PixelType in ['MONO8I', 'MONO8LU', 'RGB8LU']:
sidd_nitf_details.append({'nbpp': 8, 'pvtype': 'INT', 'pixel_type': the_sidd.Display.PixelType})
elif the_sidd.Display.PixelType == 'MONO16I':
sidd_nitf_details.append({'nbpp': 16, 'pvtype': 'INT', 'pixel_type': the_sidd.Display.PixelType})
elif the_sidd.Display.PixelType == 'RGB24I':
sidd_nitf_details.append({'nbpp': 24, 'pvtype': 'INT', 'pixel_type': the_sidd.Display.PixelType})
else:
raise ValueError('Got unhandled pixel type {}'.format(the_sidd.Display.PixelType))
valid_sicd_des = True
for entry in sicd_des:
this_sicd_valid, _ = check_sicd_data_extension(nitf_details, entry[4], entry[1])
valid_sicd_des &= this_sicd_valid
valid_image = check_image_data()
return valid_sidd_des & valid_sicd_des & valid_image
def check_file(file_name):
"""
Check the validity for the given file SIDD (i.e. appropriately styled NITF)
or xml file containing the SIDD structure alone.
Parameters
----------
file_name : str|NITFDetails
Returns
-------
bool
"""
if isinstance(file_name, str):
if not os.path.isfile(file_name):
raise ValueError('Got string input, but it is not a valid path')
# check if this is just an xml file
with open(file_name, 'rb') as fi:
initial_bits = fi.read(30)
if initial_bits.startswith(b'<?xml') or initial_bits.startswith(b'<SIDD'):
sicd_xml = fi.read().decode('utf-8')
return _evaluate_xml_string_validity(sicd_xml)[0]
return check_sidd_file(file_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser('SIDD Consistency')
parser.add_argument('file_name')
parser.add_argument(
'-l', '--level', default='WARNING',
choices=['INFO', 'WARNING', 'ERROR'], help="Logging level")
config = parser.parse_args()
logging.basicConfig(level=config.level)
logger.setLevel(config.level)
validity = check_file(config.file_name)
if validity:
logger.info('\nSIDD: {} has been validated with no errors'.format(config.file_name))
else:
logger.error('\nSIDD: {} has apparent errors'.format(config.file_name))
sys.exit(int(validity))
| 16,149 | 38.68059 | 120 | py |
sarpy | sarpy-master/sarpy/geometry/point_projection.py | """
Functions to map between the coordinates in image pixel space and geographical
coordinates.
Examples
--------
.. code-block:: python
from sarpy.geometry import point_projection
from sarpy.io.complex.sicd import SICDReader
reader = SICDReader('<path to sicd file>')
structure = reader.sicd_meta
# or reader.reader.get_sicds_as_tuple()[0]
# you can also use a SIDD structure, obtained from a product type reader
# assume that ecf_coords is some previously defined numpy array of
# shape (..., 3), with final dimension [X, Y, Z]
image_coords = point_projection.ground_to_image(ecf_coords, structure)
# image_coords will be a numpy array of shape (..., 2),
# with final dimension [row, column]
# assume that geo_coords is some previously defined numpy array of
# shape (..., 3), with final dimension [lat, lon, hae]
image_coords = point_projection.ground_to_image_geo(geo_coords, structure)
# image_coords will be a numpy array of shape (..., 2),
# with final dimension [row, column]
# assume that image_coords is some previously defined numpy array of
# shape (..., 2) with final dimension [row, column]
ecf_coords_fixed_hae = point_projection.image_to_ground(image_coords, structure, projection_type='HAE')
ecf_coords_plane = point_projection.image_to_ground(image_coords, structure, projection_type='PLANE')
geo_coords_fixed_hae = point_projection.image_to_ground_geo(image_coords, structure, projection_type='HAE')
geo_coords_plane = point_projection.image_to_ground_geo(image_coords, structure, projection_type='PLANE')
# these outputs will be numpy arrays of shape (..., 3)
# alternatively, these are also methods of the sicd/sidd structure
image_coords = structure.project_ground_to_image(ecf_coords)
image_coords = structure.project_ground_to_image_geo(geo_coords)
ecf_coords_fixed_hae = structure.project_image_to_ground(image_coords, projection_type='HAE')
ecf_coords_plane = structure.project_image_to_ground(image_coords, projection_type='PLANE')
geo_coords_fixed_hae = structure.project_image_to_ground_geo(image_coords, projection_type='HAE')
geo_coords_plane = structure.project_image_to_ground_geo(image_coords, projection_type='PLANE')
.. Note::
It should be explicitly pointed out that these methods are essentially all
inexact iterative methods which depend on convergence parameters. Changing
these parameters will yield different numerically similar, but different
results.
Under the right assumptions involving the `projection_type` parameter value
and the correct structure of the physical coordinates array, then the methods
:meth:`ground_to_image` and :meth:`image_to_ground` are **approximate**
inverses of one another. Being iterative methods, they can not generally be
made numerically exact inverses, but the tolerance can be adjusted to yield
very small differences.
.. Note::
Virtually any SIDD/SICD structure which follows the standard will have an
appropriate metadata populated to permit these projection methods.
**In the case that your structure does not have sufficient metadata populated**,
as may happen during research experimentation, an exception will be raised
with hopefully helpful details about what information is missing.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Wade Schwartzkopf")
import logging
from typing import Tuple, Union, Callable, Optional, List
from types import MethodType # for binding a method dynamically to a class
import numpy
from sarpy.geometry.geocoords import ecf_to_geodetic, geodetic_to_ecf, wgs_84_norm
from sarpy.io.complex.sicd_elements.blocks import Poly2DType, XYZPolyType
from sarpy.io.DEM.DEM import DEMInterpolator
from sarpy.io.DEM.DTED import DTEDList, DTEDInterpolator
from sarpy.io.DEM.geoid import GeoidHeight
logger = logging.getLogger(__name__)
_unhandled_text = 'Got unhandled type `{}`'
_unsupported_text = 'Got unsupported projection type `{}`'
#############
# COA Projection definition
def _validate_adj_param(
value: Union[None, numpy.ndarray, list, tuple],
name: str) -> numpy.ndarray:
"""
Validate the aperture adjustment vector parameters.
Parameters
----------
value : None|numpy.ndarray|list|tuple
name : str
Returns
-------
numpy.ndarray
"""
if value is None:
value = numpy.array([0, 0, 0], dtype='float64')
if not isinstance(value, numpy.ndarray):
value = numpy.array(value, dtype='float64')
if value.shape != (3,):
raise ValueError('{} must have shape (3, ). Got {}'.format(name, value.shape))
return value
def _ric_ecf_mat(
rarp: numpy.ndarray,
varp: numpy.ndarray,
frame_type: str) -> numpy.ndarray:
"""
Computes the ECF transformation matrix for RIC frame.
Parameters
----------
rarp : numpy.ndarray
varp : numpy.ndarray
frame_type : str
the final three characters should be one of ['ECI', 'ECF']
Returns
-------
numpy.ndarray
the RIC transform matrix (array)
"""
# Angular velocity of earth in radians/second, not including precession
w = 7292115.1467E-11
typ = frame_type.upper()[-3:]
vi = varp if typ == 'ECF' else varp + numpy.cross([0, 0, w], rarp)
r = rarp/numpy.linalg.norm(rarp)
c = numpy.cross(r, vi)
c /= numpy.linalg.norm(c) # NB: perpendicular to r
i = numpy.cross(c, r)
# this is the cross of two perpendicular normal vectors, so normal
return numpy.array([r, i, c], dtype='float64')
def _get_sicd_type_specific_projection(sicd) -> Callable:
"""
Gets an intermediate method specific projection method with six required
calling arguments (self, row_transform, col_transform, time_coa, arp_coa, varp_coa).
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
callable
"""
def pfa_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
pfa = sicd.PFA
polar_ang_poly = pfa.PolarAngPoly
spatial_freq_sf_poly = pfa.SpatialFreqSFPoly
polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)
spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)
polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)
spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
PFA specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_SCP = arp_coa - SCP
rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)
rDotSCPTgtCoa = numpy.sum(varp_coa * ARP_minus_SCP, axis=-1) / rSCPTgtCoa
thetaTgtCoa = polar_ang_poly(time_coa)
dThetaDtTgtCoa = polar_ang_poly_der(time_coa)
# Compute polar aperture scale factor (KSF) and derivative wrt polar angle
ksfTgtCoa = spatial_freq_sf_poly(thetaTgtCoa)
dKsfDThetaTgtCoa = spatial_freq_sf_poly_der(thetaTgtCoa)
# Compute spatial frequency domain phase slopes in Ka and Kc directions
# NB: sign for the phase may be ignored as it is cancelled in a subsequent computation.
dPhiDKaTgtCoa = row_transform * numpy.cos(thetaTgtCoa) + col_transform * numpy.sin(thetaTgtCoa)
dPhiDKcTgtCoa = -row_transform * numpy.sin(thetaTgtCoa) + col_transform * numpy.cos(thetaTgtCoa)
# Compute range relative to SCP
deltaRTgtCoa = ksfTgtCoa * dPhiDKaTgtCoa
# Compute derivative of range relative to SCP wrt polar angle.
# Scale by derivative of polar angle wrt time.
dDeltaRDThetaTgtCoa = dKsfDThetaTgtCoa * dPhiDKaTgtCoa + ksfTgtCoa * dPhiDKcTgtCoa
deltaRDotTgtCoa = dDeltaRDThetaTgtCoa * dThetaDtTgtCoa
return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa
return method_projection
def rgazcomp_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
az_sf = sicd.RgAzComp.AzSF
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
RgAzComp specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_SCP = arp_coa - SCP
rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)
rDotSCPTgtCoa = numpy.sum(varp_coa*ARP_minus_SCP, axis=-1)/rSCPTgtCoa
deltaRTgtCoa = row_transform
deltaRDotTgtCoa = -numpy.linalg.norm(varp_coa, axis=-1)*az_sf*col_transform
return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa
return method_projection
def inca_projection():
inca = sicd.RMA.INCA
r_ca_scp = inca.R_CA_SCP
time_ca_poly = inca.TimeCAPoly
drate_sf_poly = inca.DRateSFPoly
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
INCA specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
# compute range/time of the closest approach
R_CA_TGT = r_ca_scp + row_transform # Range at closest approach
t_CA_TGT = time_ca_poly(col_transform) # Time of the closest approach
# Compute ARP velocity magnitude (actually squared, since that's how it's used) at t_CA_TGT
# noinspection PyProtectedMember
VEL2_CA_TGT = numpy.sum(instance._varp_poly(t_CA_TGT)**2, axis=-1)
# Compute the Doppler Rate Scale Factor for image Grid location
DRSF_TGT = drate_sf_poly(row_transform, col_transform)
# Difference between COA time and CA time
dt_COA_TGT = time_coa - t_CA_TGT
r_tgt_coa = numpy.sqrt(R_CA_TGT*R_CA_TGT + DRSF_TGT*VEL2_CA_TGT*dt_COA_TGT*dt_COA_TGT)
r_dot_tgt_coa = (DRSF_TGT/r_tgt_coa)*VEL2_CA_TGT*dt_COA_TGT
return r_tgt_coa, r_dot_tgt_coa
return method_projection
def plane_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
uRow = sicd.Grid.Row.UVectECF.get_array()
uCol = sicd.Grid.Col.UVectECF.get_array()
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
Plane specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_IPP = arp_coa - (SCP + numpy.outer(row_transform, uRow) + numpy.outer(col_transform, uCol))
r_tgt_coa = numpy.linalg.norm(ARP_minus_IPP, axis=-1)
r_dot_tgt_coa = numpy.sum(varp_coa * ARP_minus_IPP, axis=-1)/r_tgt_coa
return r_tgt_coa, r_dot_tgt_coa
return method_projection
# NB: sicd.can_project_coordinates() has been called, so all required attributes
# must be populated
if sicd.Grid.Type == 'RGAZIM':
if sicd.ImageFormation.ImageFormAlgo == 'PFA':
return pfa_projection()
elif sicd.ImageFormation.ImageFormAlgo == 'RGAZCOMP':
return rgazcomp_projection()
elif sicd.Grid.Type == 'RGZERO':
return inca_projection()
elif sicd.Grid.Type in ['XRGYCR', 'XCTYAT', 'PLANE']:
return plane_projection()
else:
# NB: this will have been noted by sicd.can_project_coordinates(), but is
# here for completeness
raise ValueError('Unhandled Grid.Type `{}`'.format(sicd.Grid.Type))
def _get_sicd_adjustment_params(
sicd,
delta_arp: Union[None, numpy.ndarray, list, tuple],
delta_varp: Union[None, numpy.ndarray, list, tuple],
adj_params_frame: str) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Gets the SICD adjustment params.
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
delta_arp : None|numpy.ndarray|list|tuple
delta_varp : None|numpy.ndarray|list|tuple
adj_params_frame : str
Returns
-------
delta_arp: numpy.ndarray
delta_varp: numpy.ndarray
"""
delta_arp = _validate_adj_param(delta_arp, 'delta_arp')
delta_varp = _validate_adj_param(delta_varp, 'delta_varp')
if adj_params_frame in ['RIC_ECI', 'RIC_ECF']:
if sicd.SCPCOA.ARPPos is None or sicd.SCPCOA.ARPVel is None:
raise ValueError(
'The adj_params_frame is of RIC type, but one of SCPCOA.ARPPos or '
'SCPCOA.ARPVel is not populated.')
ARP_SCP_COA = sicd.SCPCOA.ARPPos.get_array()
VARP_SCP_COA = sicd.SCPCOA.ARPVel.get_array()
ric_matrix = _ric_ecf_mat(ARP_SCP_COA, VARP_SCP_COA, adj_params_frame)
delta_arp = ric_matrix.dot(delta_arp)
delta_varp = ric_matrix.dot(delta_varp)
return delta_arp, delta_varp
def _get_sidd_type_projection(sidd) -> Union[Poly2DType, Callable]:
"""
Gets an intermediate method specific projection method with six required
calling arguments (self, row_transform, col_transform, time_coa, arp_coa, varp_coa).
Parameters
----------
sidd : sarpy.io.product.sidd1_elements.SIDD.SIDDType1|sarpy.io.product.sidd2_elements.SIDD.SIDDType2
Returns
-------
(Poly2DType, callable)
"""
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
def pgp(the_sidd):
"""
Parameters
----------
the_sidd : SIDDType2|SIDDType1
Returns
-------
callable
"""
plane_proj = the_sidd.Measurement.PlaneProjection
SRP = plane_proj.ReferencePoint.ECEF.get_array()
SRP_row = plane_proj.ReferencePoint.Point.Row
SRP_col = plane_proj.ReferencePoint.Point.Col
row_vector = plane_proj.ProductPlane.RowUnitVector.get_array()*plane_proj.SampleSpacing.Row
col_vector = plane_proj.ProductPlane.ColUnitVector.get_array()*plane_proj.SampleSpacing.Col
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
Plane specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_IPP = arp_coa - \
(SRP + numpy.outer(row_transform - SRP_row, row_vector) +
numpy.outer(col_transform - SRP_col, col_vector))
r_tgt_coa = numpy.linalg.norm(ARP_minus_IPP, axis=-1)
r_dot_tgt_coa = numpy.sum(varp_coa * ARP_minus_IPP, axis=-1)/r_tgt_coa
return r_tgt_coa, r_dot_tgt_coa
return plane_proj.TimeCOAPoly, method_projection
if not isinstance(sidd, (SIDDType2, SIDDType1)):
raise TypeError(_unhandled_text.format(type(sidd)))
if sidd.Measurement.PlaneProjection is not None:
return pgp(sidd)
else:
raise ValueError('Currently the only supported projection is PlaneProjection.')
def _get_sidd_adjustment_params(
sidd,
delta_arp: Union[None, numpy.ndarray, list, tuple],
delta_varp: Union[None, numpy.ndarray, list, tuple],
adj_params_frame: str) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Get the SIDD adjustment parameters.
Parameters
----------
sidd : sarpy.io.product.sidd1_elements.SIDD.SIDDType1|sarpy.io.product.sidd2_elements.SIDD.SIDDType2
delta_arp : None|numpy.ndarray|list|tuple
delta_varp : None|numpy.ndarray|list|tuple
adj_params_frame : str
Returns
-------
delta_arp: numpy.ndarray
delta_varp: numpy.ndarray
"""
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if not isinstance(sidd, (SIDDType2, SIDDType1)):
raise TypeError('Got sidd of unhandled type {}'.format(type(sidd)))
delta_arp = _validate_adj_param(delta_arp, 'delta_arp')
delta_varp = _validate_adj_param(delta_varp, 'delta_varp')
if adj_params_frame in ['RIC_ECI', 'RIC_ECF']:
arp_pos_poly = sidd.Measurement.ARPPoly
arp_vel_poly = arp_pos_poly.derivative(der_order=1, return_poly=True)
if sidd.Measurement.PlaneProjection is not None:
srp_row = sidd.Measurement.PlaneProjection.ReferencePoint.Point.Row
srp_col = sidd.Measurement.PlaneProjection.ReferencePoint.Point.Col
srp_coa_time = sidd.Measurement.PlaneProjection.TimeCOAPoly(srp_row, srp_col)
srp_pos = arp_pos_poly(srp_coa_time)
srp_vel = arp_vel_poly(srp_coa_time)
ric_matrix = _ric_ecf_mat(srp_pos, srp_vel, adj_params_frame)
delta_arp = ric_matrix.dot(delta_arp)
delta_varp = ric_matrix.dot(delta_varp)
else:
raise ValueError('Got unhandled projection type {}'.format(sidd.Measurement.ProjectionType))
return delta_arp, delta_varp
class COAProjection(object):
"""
The Center of Aperture projection object, which provides common projection
functionality for all image to R/Rdot projection. This is a helper class,
and generally not intended for direct usage.
"""
__slots__ = (
'_time_coa_poly', '_arp_poly', '_varp_poly', '_method_proj',
'_row_shift', '_row_mult', '_col_shift', '_col_mult',
'_delta_arp', '_delta_varp', '_range_bias',)
def __init__(
self,
time_coa_poly: Poly2DType,
arp_poly: XYZPolyType,
method_projection: Callable,
row_shift: Union[int, float] = 0,
row_mult: Union[int, float] = 1,
col_shift: Union[int, float] = 0,
col_mult: Union[int, float] = 1,
delta_arp: Union[None, numpy.ndarray, list, tuple] = None,
delta_varp: Union[None, numpy.ndarray, list, tuple] = None,
range_bias: Optional[float] = None):
"""
Parameters
----------
time_coa_poly : Poly2DType
The time center of aperture polynomial.
arp_poly : XYZPolyType
The aperture position polynomial.
method_projection : callable
The method specific projection for performing the projection from image
coordinates to R/Rdot space. The call signature is expected to be
`method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa)`,
where `row_transform = row_mult*(row - row_shift)`,
`col_transform = col_mult*(col - col_shift)`,
`time_coa = time_coa_poly(row_transform, col_transform)`,
`arp_coa = arp_poly(time_coa)`, and `varp_coa = varp_poly(time_coa)`.
row_shift : int|float
The shift part of the affine row transformation for plugging into the
time coa polynomial.
row_mult : int|float
The multiple part of the affine row transformation for plugging into
the time coa polynomial.
col_shift : int|float
The shift part of the affine column transformation for plugging into
the time coa polynomial.
col_mult : int|float
The multiple part of the affine column transformation for plugging into
the time coa polynomial.
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
"""
if not isinstance(time_coa_poly, Poly2DType):
raise TypeError('time_coa_poly must be a Poly2DType instance.')
self._time_coa_poly = time_coa_poly
if not isinstance(arp_poly, XYZPolyType):
raise TypeError('arp_poly must be an XYZPolyType instance.')
self._arp_poly = arp_poly
self._varp_poly = self._arp_poly.derivative(der_order=1, return_poly=True) # type: XYZPolyType
if not callable(method_projection):
raise TypeError('method_projection must be callable.')
self._method_proj = MethodType(method_projection, self)
# affine transform parameters
self._row_shift = float(row_shift)
self._row_mult = float(row_mult)
self._col_shift = float(col_shift)
self._col_mult = float(col_mult)
# aperture location adjustment parameters
self._delta_arp = _validate_adj_param(delta_arp, 'delta_arp')
self._delta_varp = _validate_adj_param(delta_varp, 'delta_varp')
self._range_bias = 0.0 if range_bias is None else float(range_bias) # type: float
@property
def delta_arp(self) -> numpy.ndarray:
"""
numpy.ndarray: The delta arp adjustable parameter
"""
return self._delta_arp
@property
def delta_varp(self) -> numpy.ndarray:
"""
numpy.ndarray: The delta varp adjustable parameter
"""
return self._delta_varp
@property
def range_bias(self) -> float:
"""
float: The range bias adjustable parameter
"""
return self._range_bias
@property
def delta_range(self) -> float:
"""
float: Alias to the range bias adjustable parameter
"""
return self._range_bias
@classmethod
def from_sicd(
cls,
sicd,
delta_arp: Union[None, numpy.ndarray, list, tuple] = None,
delta_varp: Union[None, numpy.ndarray, list, tuple] = None,
range_bias: Optional[float] = None,
adj_params_frame: str = 'ECF'):
"""
Construct from a SICD structure.
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
The SICD metadata structure.
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of `('ECF', 'RIC_ECI', 'RIC_ECF')`.
Returns
-------
COAProjection
"""
if not sicd.can_project_coordinates():
raise ValueError('Insufficient metadata populated to formulate projection.')
time_coa_poly = sicd.Grid.TimeCOAPoly
# fall back to approximation if TimeCOAPoly is not populated
if time_coa_poly is None:
time_coa_poly = Poly2DType(Coefs=[[sicd.Timeline.CollectDuration/2, ], ])
logger.warning(
'Using (constant) approximation to TimeCOAPoly, which may result in poor projection results.')
arp_poly = sicd.Position.ARPPoly
# transform parameters
row_mult = sicd.Grid.Row.SS
row_shift = sicd.ImageData.SCPPixel.Row - sicd.ImageData.FirstRow
col_mult = sicd.Grid.Col.SS
col_shift = sicd.ImageData.SCPPixel.Col - sicd.ImageData.FirstCol
# location adjustment parameters
delta_arp, delta_varp = _get_sicd_adjustment_params(sicd, delta_arp, delta_varp, adj_params_frame)
return cls(time_coa_poly, arp_poly, _get_sicd_type_specific_projection(sicd),
row_shift=row_shift, row_mult=row_mult, col_shift=col_shift, col_mult=col_mult,
delta_arp=delta_arp, delta_varp=delta_varp, range_bias=range_bias)
@classmethod
def from_sidd(
cls,
sidd,
delta_arp: Union[None, numpy.ndarray, list, tuple] = None,
delta_varp: Union[None, numpy.ndarray, list, tuple] = None,
range_bias: Optional[float] = None,
adj_params_frame: str = 'ECF'):
"""
Construct from the SIDD structure.
Parameters
----------
sidd : sarpy.io.product.sidd1_elements.SIDD.SIDDType1|sarpy.io.product.sidd2_elements.SIDD.SIDDType2
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of `('ECF', 'RIC_ECI', 'RIC_ECF')`.
Returns
-------
COAProjection
"""
time_coa_poly, method_projection = _get_sidd_type_projection(sidd)
arp_poly = sidd.Measurement.ARPPoly
delta_arp, delta_varp = _get_sidd_adjustment_params(
sidd, delta_arp, delta_varp, adj_params_frame)
return cls(time_coa_poly, arp_poly, method_projection,
row_shift=0, row_mult=1, col_shift=0, col_mult=1,
delta_arp=delta_arp, delta_varp=delta_varp, range_bias=range_bias)
def _init_proj(
self,
im_points: numpy.ndarray) -> Tuple[
numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]:
"""
Parameters
----------
im_points : numpy.ndarray
Returns
-------
row_transform: numpy.ndarray
col_transform: numpy.ndarray
time_coa: numpy.ndarray
arp_coa: numpy.ndarray
varp_coa: numpy.ndarray
"""
row_transform = (im_points[:, 0] - self._row_shift)*self._row_mult
col_transform = (im_points[:, 1] - self._col_shift)*self._col_mult
time_coa = self._time_coa_poly(row_transform, col_transform)
# calculate aperture reference position and velocity at target time
arp_coa = self._arp_poly(time_coa)
varp_coa = self._varp_poly(time_coa)
return row_transform, col_transform, time_coa, arp_coa, varp_coa
def projection(
self,
im_points: numpy.ndarray) -> Tuple[
numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]:
"""
Perform the projection from image coordinates to R/Rdot coordinates.
Parameters
----------
im_points : numpy.ndarray
This array of image point coordinates, **expected to have shape (N, 2)**.
Returns
-------
r_tgt_coa: numpy.ndarray
range to the ARP at COA
r_dot_tgt_coa: numpy.ndarray
range rate relative to the ARP at COA
time_coa: numpy.ndarray
center of aperture time since CDP start for input ip
arp_coa: numpy.ndarray
aperture reference position at time_coa
varp_coa: numpy.ndarray
velocity at time_coa
"""
row_transform, col_transform, time_coa, arp_coa, varp_coa = self._init_proj(im_points)
r_tgt_coa, r_dot_tgt_coa = self._method_proj(row_transform, col_transform, time_coa, arp_coa, varp_coa)
# adjust parameters
arp_coa += self._delta_arp
varp_coa += self._delta_varp
r_tgt_coa += self._range_bias
return r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa
def _get_coa_projection(
structure,
use_structure_coa: bool,
**coa_args) -> COAProjection:
"""
Parameters
----------
structure
use_structure_coa : bool
coa_args
Returns
-------
COAProjection
"""
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if use_structure_coa and structure.coa_projection is not None:
return structure.coa_projection
elif isinstance(structure, SICDType):
return COAProjection.from_sicd(structure, **coa_args)
elif isinstance(structure, (SIDDType2, SIDDType1)):
return COAProjection.from_sidd(structure, **coa_args)
else:
raise ValueError(_unhandled_text.format(type(structure)))
###############
# General helper methods for extracting params from the sicd or sidd
def _get_reference_point(structure) -> numpy.ndarray:
"""
Gets the reference point in ECF coordinates.
Parameters
----------
structure
Returns
-------
numpy.ndarray
"""
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if isinstance(structure, SICDType):
return structure.GeoData.SCP.ECF.get_array(dtype='float64')
elif isinstance(structure, (SIDDType2, SIDDType1)):
proj_type = structure.Measurement.ProjectionType
if proj_type != 'PlaneProjection':
raise ValueError(_unsupported_text.format(proj_type))
return structure.Measurement.PlaneProjection.ReferencePoint.ECEF.get_array(dtype='float64')
else:
raise TypeError(_unhandled_text.format(type(structure)))
def _get_outward_norm(structure, gref: numpy.ndarray) -> numpy.ndarray:
"""
Gets the default outward unit norm.
Parameters
----------
structure
gref : numpy.ndarray
Returns
-------
numpy.ndarray
"""
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if isinstance(structure, SICDType):
if structure.ImageFormation.ImageFormAlgo == 'PFA':
return structure.PFA.FPN.get_array()
else:
return wgs_84_norm(gref)
elif isinstance(structure, (SIDDType2, SIDDType1)):
proj_type = structure.Measurement.ProjectionType
if proj_type != 'PlaneProjection':
raise ValueError(_unsupported_text.format(proj_type))
the_proj = structure.Measurement.PlaneProjection
# image plane details
uRow = the_proj.ProductPlane.RowUnitVector.get_array(dtype='float64')
uCol = the_proj.ProductPlane.ColUnitVector.get_array(dtype='float64')
# outward unit norm for plane
uGPN = numpy.cross(uRow, uCol)
uGPN /= numpy.linalg.norm(uGPN)
if numpy.dot(uGPN, gref) < 0:
uGPN *= -1
return uGPN
else:
raise TypeError(_unhandled_text.format(type(structure)))
def _extract_plane_params(structure) -> Tuple[
numpy.ndarray, numpy.ndarray, float, float, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]:
"""
Extract the required parameters for projection from ground to plane for a SICD.
Parameters
----------
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
Returns
-------
ref_point: numpy.ndarray
ref_pixel: numpy.ndarray
row_ss: float
col_ss: float
uRow: numpy.ndarray
uCol: numpy.ndarray
uGPN: numpy.ndarray
uSPN: numpy.ndarray
"""
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if isinstance(structure, SICDType):
# reference point for the plane
ref_point = structure.GeoData.SCP.ECF.get_array()
ref_pixel = structure.ImageData.SCPPixel.get_array()
# pixel spacing
row_ss = structure.Grid.Row.SS
col_ss = structure.Grid.Col.SS
# image plane details
uRow = structure.Grid.Row.UVectECF.get_array() # unit normal in row direction
uCol = structure.Grid.Col.UVectECF.get_array() # unit normal in column direction
# outward unit norm
uGPN = structure.PFA.FPN.get_array() if structure.ImageFormation.ImageFormAlgo == 'PFA' \
else wgs_84_norm(ref_point)
# uSPN - defined in section 3.1 as normal to instantaneous slant plane that contains SCP at SCP COA is
# tangent to R/Rdot contour at SCP. Points away from center of Earth. Use look to establish sign.
ARP_SCP_COA = structure.SCPCOA.ARPPos.get_array()
VARP_SCP_COA = structure.SCPCOA.ARPVel.get_array()
uSPN = structure.SCPCOA.look*numpy.cross(VARP_SCP_COA, ref_point - ARP_SCP_COA)
uSPN /= numpy.linalg.norm(uSPN)
return ref_point, ref_pixel, row_ss, col_ss, uRow, uCol, uGPN, uSPN
elif isinstance(structure, (SIDDType1, SIDDType2)):
proj_type = structure.Measurement.ProjectionType
if proj_type != 'PlaneProjection':
raise ValueError(_unsupported_text.format(proj_type))
the_proj = structure.Measurement.PlaneProjection
# reference point for the plane
ref_point = the_proj.ReferencePoint.ECEF.get_array(dtype='float64')
ref_pixel = the_proj.ReferencePoint.Point.get_array(dtype='float64')
# pixel spacing
row_ss = the_proj.SampleSpacing.Row
col_ss = the_proj.SampleSpacing.Col
# image plane details
uRow = the_proj.ProductPlane.RowUnitVector.get_array(dtype='float64')
uCol = the_proj.ProductPlane.ColUnitVector.get_array(dtype='float64')
# outward unit norm for plane
uGPN = numpy.cross(uRow, uCol)
uGPN /= numpy.linalg.norm(uGPN)
if numpy.dot(uGPN, ref_point) < 0:
uGPN *= -1
# slant plane is identical to outward unit norm
return ref_point, ref_pixel, row_ss, col_ss, uRow, uCol, uGPN, uGPN
else:
raise TypeError('Got structure unsupported type {}'.format(type(structure)))
#############
# Ground-to-Image (aka Scene-to-Image) projection.
def _validate_coords(coords: numpy.ndarray) -> Tuple[numpy.ndarray, Tuple[int, ...]]:
if not isinstance(coords, numpy.ndarray):
coords = numpy.array(coords, dtype='float64')
orig_shape = coords.shape
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (1, -1))
if coords.shape[-1] != 3:
raise ValueError(
'The coords array must represent an array of points in ECF coordinates, '
'so the final dimension of coords must have length 3. Have coords.shape = {}'.format(coords.shape))
return coords, orig_shape
def _ground_to_image(
coords: numpy.ndarray,
coa_proj: COAProjection,
uGPN: numpy.ndarray,
ref_point: numpy.ndarray,
ref_pixel: numpy.ndarray,
uIPN: numpy.ndarray,
sf: float,
row_ss: float,
col_ss: float,
uProj: numpy.ndarray,
row_col_transform: numpy.ndarray,
ipp_transform: numpy.ndarray,
tolerance: float,
max_iterations: int) -> Tuple[numpy.ndarray, numpy.ndarray, int]:
"""
Basic level helper function.
Parameters
----------
coords : numpy.ndarray|tuple|list
coa_proj : COAProjection
uGPN : numpy.ndarray
ref_point : numpy.ndarray
ref_pixel : numpy.ndarray
uIPN : numpy.ndarray
sf : float
row_ss : float
col_ss : float
uProj : numpy.ndarray
row_col_transform : numpy.ndarray
ipp_transform : numpy.ndarray
tolerance : float
max_iterations : int
Returns
-------
image_points: numpy.ndarray
The determined image point array, of size `N x 2`. Following SICD
convention, the upper-left pixel is [0, 0].
delta_gpn: numpy.ndarray
Residual ground plane displacement (m).
iterations: int
The number of iterations performed.
"""
g_n = coords.copy()
im_points = numpy.zeros((coords.shape[0], 2), dtype='float64')
delta_gpn = numpy.zeros((coords.shape[0],), dtype='float64')
cont = True
iteration = 0
matrix_transform = numpy.dot(row_col_transform, ipp_transform)
# (3 x 2)*(2 x 2) = (3 x 2)
while cont:
# project ground plane to image plane iteration
iteration += 1
dist_n = numpy.dot(ref_point - g_n, uIPN)/sf # (N, )
i_n = g_n + numpy.outer(dist_n, uProj) # (N, 3)
delta_ipp = i_n - ref_point # (N, 3)
ip_iter = numpy.dot(delta_ipp, matrix_transform) # (N, 2)
im_points[:, 0] = ip_iter[:, 0]/row_ss + ref_pixel[0]
im_points[:, 1] = ip_iter[:, 1]/col_ss + ref_pixel[1]
# transform to ground plane containing the scene points and check how it compares
p_n = _image_to_ground_plane(im_points, coa_proj, g_n, uGPN)
# compute displacement between scene point and this new projected point
diff_n = coords - p_n
delta_gpn[:] = numpy.linalg.norm(diff_n, axis=1)
g_n += diff_n
# should we continue iterating?
cont = numpy.any(delta_gpn > tolerance) and (iteration < max_iterations)
return im_points, delta_gpn, iteration
def ground_to_image(
coords: Union[numpy.ndarray, list, tuple],
structure,
tolerance: float = 1e-2,
max_iterations: int = 10,
block_size: Optional[int] = 50000,
use_structure_coa: bool = True,
**coa_args) -> Tuple[numpy.ndarray, Union[numpy.ndarray, float], Union[numpy.ndarray, int]]:
"""
Transforms a 3D ECF point to pixel (row/column) coordinates. This is
implemented in accordance with the SICD Image Projections Description Document.
**Really Scene-To-Image projection.**"
Parameters
----------
coords : numpy.ndarray|tuple|list
ECF coordinate to map to scene coordinates, of size `N x 3`.
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD data structure.
tolerance : float|int
Ground plane displacement tol (m).
max_iterations : int
maximum number of iterations to perform
block_size : int|None
size of blocks of coordinates to transform at a time
use_structure_coa : bool
If sicd.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
coa_args
The keyword arguments from the COAProjection.from_sicd class method.
Returns
-------
image_points: numpy.ndarray
The determined image point array. Following the SICD convention, t
the upper-left pixel is [0, 0].
delta_gpn: numpy.ndarray|float
The residual ground plane displacement (m).
iterations: numpy.ndarray|int
The number of iterations performed.
"""
coords, orig_shape = _validate_coords(coords)
coa_proj = _get_coa_projection(structure, use_structure_coa, **coa_args)
ref_point, ref_pixel, row_ss, col_ss, uRow, uCol, \
uGPN, uSPN = _extract_plane_params(structure)
uIPN = numpy.cross(uRow, uCol) # NB: only outward pointing if Row/Col are right-handed system
uIPN /= numpy.linalg.norm(uIPN) # NB: uRow/uCol may not be perpendicular
cos_theta = numpy.dot(uRow, uCol)
sin_theta = numpy.sqrt(1 - cos_theta*cos_theta)
ipp_transform = numpy.array(
[[1, -cos_theta], [-cos_theta, 1]], dtype='float64')/(sin_theta*sin_theta)
row_col_transform = numpy.zeros((3, 2), dtype='float64')
row_col_transform[:, 0] = uRow
row_col_transform[:, 1] = uCol
sf = float(numpy.dot(uSPN, uIPN)) # scale factor
tolerance = float(tolerance)
if tolerance < 1e-12:
logger.warning(
'minimum allowed tolerance is 1e-12 meters, resetting from {}'.format(tolerance))
tolerance = 1e-12
# prepare the work space
coords_view = numpy.reshape(coords, (-1, 3)) # possibly or make 2-d flatten
num_points = coords_view.shape[0]
if block_size is None or num_points <= block_size:
image_points, delta_gpn, iters = _ground_to_image(
coords_view, coa_proj, uGPN,
ref_point, ref_pixel, uIPN, sf, row_ss, col_ss, uSPN,
row_col_transform, ipp_transform, tolerance, max_iterations)
iters = numpy.full((num_points, ), iters)
else:
image_points = numpy.zeros((num_points, 2), dtype='float64')
delta_gpn = numpy.zeros((num_points, ), dtype='float64')
iters = numpy.zeros((num_points, ), dtype='int16')
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block+block_size, num_points)
image_points[start_block:end_block, :], delta_gpn[start_block:end_block], \
iters[start_block:end_block] = _ground_to_image(
coords_view[start_block:end_block, :], coa_proj, uGPN,
ref_point, ref_pixel, uIPN, sf, row_ss, col_ss, uSPN,
row_col_transform, ipp_transform, tolerance, max_iterations)
start_block = end_block
if len(orig_shape) == 1:
image_points = numpy.reshape(image_points, (-1,))
delta_gpn = float(delta_gpn[0])
iters = int(iters[0])
elif len(orig_shape) > 1:
image_points = numpy.reshape(image_points, orig_shape[:-1]+(2, ))
delta_gpn = numpy.reshape(delta_gpn, orig_shape[:-1])
iters = numpy.reshape(iters, orig_shape[:-1])
return image_points, delta_gpn, iters
def ground_to_image_geo(
coords,
structure,
ordering='latlong',
**kwargs) -> Tuple[numpy.ndarray, Union[numpy.ndarray, float], Union[numpy.ndarray, int]]:
"""
Transforms a 3D Lat/Lon/HAE point to pixel (row/column) coordinates.
This is implemented in accordance with the SICD Image Projections Description Document.
Parameters
----------
coords : numpy.ndarray|tuple|list
Lat/Lon/HAE coordinate to map to scene coordinates, of size `N x 3`.
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
ordering : str
If 'longlat', then the input is `[longitude, latitude, hae]`.
Otherwise, the input is `[latitude, longitude, hae]`. Passed through
to :func:`sarpy.geometry.geocoords.geodetic_to_ecf`.
kwargs
See the key word arguments of :func:`ground_to_image`
Returns
-------
image_points: numpy.ndarray
The determined image point array. Following the SICD convention,
the upper-left pixel is [0, 0].
delta_gpn: numpy.ndarray|float
The residual ground plane displacement (m).
iterations: numpy.ndarray|int
The number of iterations performed.
"""
return ground_to_image(geodetic_to_ecf(coords, ordering=ordering), structure, **kwargs)
############
# Image-To-Ground projections
def _validate_im_points(
im_points: Union[numpy.ndarray, list, tuple]) -> Tuple[numpy.ndarray, Tuple[int, ...]]:
"""
Parameters
----------
im_points : numpy.ndarray|list|tuple
Returns
-------
im_points: numpy.ndarray
orig_shape: Tuple[int, ...]
"""
if im_points is None:
raise ValueError('The argument cannot be None')
if not isinstance(im_points, numpy.ndarray):
im_points = numpy.array(im_points, dtype='float64')
orig_shape = im_points.shape
if len(im_points.shape) == 1:
im_points = numpy.reshape(im_points, (1, -1))
if im_points.shape[-1] != 2:
raise ValueError(
'The im_points array must represent an array of points in pixel coordinates, '
'so the final dimension of im_points must have length 2. '
'Have im_points.shape = {}'.format(im_points.shape))
return im_points, orig_shape
def image_to_ground(
im_points: Union[numpy.ndarray, list, tuple],
structure,
block_size: Optional[int] = 50000,
projection_type: str = 'HAE',
use_structure_coa: bool = True,
**kwargs) -> numpy.ndarray:
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
(row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).
Following SICD convention, the upper-left pixel is [0, 0].
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
projection_type : str
One of ['PLANE', 'HAE', 'DEM'].
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
kwargs
keyword arguments relevant for the given projection type. See image_to_ground_plane/hae/dem methods.
Returns
-------
numpy.ndarray
Physical coordinates (in ECF) corresponding input image coordinates. The interpretation
or meaning of the physical coordinates depends on `projection_type` chosen.
"""
p_type = projection_type.upper()
if p_type == 'PLANE':
return image_to_ground_plane(
im_points, structure, block_size=block_size, use_structure_coa=use_structure_coa, **kwargs)
elif p_type == 'HAE':
return image_to_ground_hae(
im_points, structure, block_size=block_size, use_structure_coa=use_structure_coa, **kwargs)
elif p_type == 'DEM':
return image_to_ground_dem(
im_points, structure, block_size=block_size, use_structure_coa=use_structure_coa, **kwargs)
else:
raise ValueError('Got unrecognized projection type {}'.format(projection_type))
def image_to_ground_geo(
im_points: Union[numpy.ndarray, list, tuple],
structure,
ordering: str = 'latlong',
block_size: Optional[int] = 50000,
projection_type: str = 'HAE',
use_structure_coa: bool = True,
**kwargs) -> numpy.ndarray:
"""
Transforms image coordinates to ground plane Lat/Lon/HAE coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
(row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).
Following SICD convention, the upper-left pixel is [0, 0].
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
ordering : str
Determines whether return is ordered as `[lat, long, hae]` or `[long, lat, hae]`.
Passed through to :func:`sarpy.geometry.geocoords.ecf_to_geodetic`.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
projection_type : str
One of ['PLANE', 'HAE', 'DEM'].
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
kwargs
See the keyword arguments in :func:`image_to_ground`.
Returns
-------
numpy.ndarray
Ground Plane Point (in Lat/Lon/HAE coordinates) along the R/Rdot contour.
"""
return ecf_to_geodetic(
image_to_ground(
im_points, structure, block_size=block_size, projection_type=projection_type,
use_structure_coa=use_structure_coa, **kwargs),
ordering=ordering)
#####
# Image-to-Ground Plane
def _image_to_ground_plane_perform(
r_tgt_coa: numpy.ndarray,
r_dot_tgt_coa: numpy.ndarray,
arp_coa: numpy.ndarray,
varp_coa: numpy.ndarray,
gref: numpy.ndarray,
uZ: numpy.ndarray) -> numpy.ndarray:
"""
Parameters
----------
r_tgt_coa : numpy.ndarray
r_dot_tgt_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
gref : numpy.ndarray
uZ : numpy.ndarray
Returns
-------
numpy.ndarray
"""
# Solve for the intersection of a R/Rdot contour and a ground plane.
arpZ = numpy.sum((arp_coa - gref)*uZ, axis=-1)
arpZ[arpZ > r_tgt_coa] = numpy.nan
# ARP ground plane nadir
aGPN = arp_coa - numpy.outer(arpZ, uZ)
# Compute ground plane distance (gd) from ARP nadir to circle of const range
gd = numpy.sqrt(r_tgt_coa*r_tgt_coa - arpZ*arpZ)
# Compute sine and cosine of grazing angle
cosGraz = gd/r_tgt_coa
sinGraz = arpZ/r_tgt_coa
# Velocity components normal to ground plane and parallel to ground plane.
vMag = numpy.linalg.norm(varp_coa, axis=-1)
vZ = numpy.dot(varp_coa, uZ)
vX = numpy.sqrt(vMag*vMag - vZ*vZ) # Note: For Vx = 0, no Solution
# Orient X such that Vx > 0 and compute unit vectors uX and uY
uX = (varp_coa - numpy.outer(vZ, uZ))/vX[:, numpy.newaxis]
uY = numpy.cross(uZ, uX)
# Compute cosine of azimuth angle to ground plane point
cosAz = (-r_dot_tgt_coa+vZ*sinGraz) / (vX * cosGraz)
cosAz[numpy.abs(cosAz) > 1] = numpy.nan # R/Rdot combination not possible in given plane
# Compute sine of azimuth angle. Use LOOK to establish sign.
look = numpy.sign(numpy.dot(numpy.cross(arp_coa-gref, varp_coa), uZ))
sinAz = look*numpy.sqrt(1-cosAz*cosAz)
# Compute Ground Plane Point in ground plane and along the R/Rdot contour
return aGPN + uX*(gd*cosAz)[:, numpy.newaxis] + uY*(gd*sinAz)[:, numpy.newaxis]
def _image_to_ground_plane(
im_points: numpy.ndarray,
coa_projection: COAProjection,
gref: numpy.ndarray,
uZ: numpy.ndarray) -> numpy.ndarray:
"""
Parameters
----------
im_points : numpy.ndarray
coa_projection : COAProjection
gref : numpy.ndarray
uZ : numpy.ndarray
Returns
-------
numpy.ndarray
"""
r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa = coa_projection.projection(im_points)
values = _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ)
return values
def image_to_ground_plane(
im_points: Union[numpy.ndarray, list, tuple],
structure,
block_size: Optional[int] = 50000,
gref: Union[None, numpy.ndarray, list, tuple] = None,
ugpn: Union[None, numpy.ndarray, list, tuple] = None,
use_structure_coa: bool = True,
**coa_args):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
gref : None|numpy.ndarray|list|tuple
Ground plane reference point ECF coordinates (m). The default is the SCP or Reference Point.
ugpn : None|numpy.ndarray|list|tuple
Vector normal to the plane to which we are projecting.
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
coa_args
keyword arguments for COAProjection.from_sicd class method.
Returns
-------
numpy.ndarray
Ground Plane Point (in ECF coordinates) corresponding to the input image coordinates.
"""
im_points, orig_shape = _validate_im_points(im_points)
coa_proj = _get_coa_projection(structure, use_structure_coa, **coa_args)
# method parameter validation
if gref is None:
gref = _get_reference_point(structure)
if not isinstance(gref, numpy.ndarray):
gref = numpy.array(gref, dtype='float64')
if gref.size != 3:
raise ValueError('gref must have three elements.')
if gref.ndim != 1:
gref = numpy.reshape(gref, (3, ))
if ugpn is None:
ugpn = _get_outward_norm(structure, gref)
if not isinstance(ugpn, numpy.ndarray):
ugpn = numpy.array(ugpn, dtype='float64')
if ugpn.size != 3:
raise ValueError('ugpn must have three elements.')
if ugpn.ndim != 1:
ugpn = numpy.reshape(ugpn, (3, ))
uZ = ugpn/numpy.linalg.norm(ugpn)
# prepare workspace
im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten
num_points = im_points_view.shape[0]
if block_size is None or num_points <= block_size:
coords = _image_to_ground_plane(im_points_view, coa_proj, gref, uZ)
else:
coords = numpy.zeros((num_points, 3), dtype='float64')
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block + block_size, num_points)
coords[start_block:end_block, :] = _image_to_ground_plane(
im_points_view[start_block:end_block], coa_proj, gref, uZ)
start_block = end_block
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (-1, ))
elif len(orig_shape) > 1:
coords = numpy.reshape(coords, orig_shape[:-1] + (3,))
return coords
#####
# Image-to-HAE
def _image_to_ground_hae_perform(
r_tgt_coa: numpy.ndarray,
r_dot_tgt_coa: numpy.ndarray,
arp_coa: numpy.ndarray,
varp_coa: numpy.ndarray,
ref_point: numpy.ndarray,
ugpn: numpy.ndarray,
hae0: float,
tolerance: float,
max_iterations: int,
ref_hae: float) -> numpy.ndarray:
"""
Intermediate helper method.
Parameters
----------
r_tgt_coa : numpy.ndarray
r_dot_tgt_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
ref_point : numpy.ndarray
ugpn : numpy.ndarray
hae0 : float
tolerance : float
max_iterations : int
ref_hae : float
Returns
-------
numpy.ndarray
"""
# Compute the geodetic ground plane normal at the ref_point.
look = numpy.sign(numpy.sum(numpy.cross(arp_coa, varp_coa)*(ref_point - arp_coa), axis=1))
gref = ref_point - (ref_hae - hae0)*ugpn
# iteration variables
gpp = None
delta_hae = None
cont = True
iters = 0
while cont:
iters += 1
# Compute the precise projection along the R/Rdot contour to Ground Plane.
gpp = _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, ugpn)
# check our hae value versus hae0
gpp_llh = ecf_to_geodetic(gpp)
delta_hae = gpp_llh[:, 2] - hae0
max_abs_delta_hae = numpy.max(numpy.abs(delta_hae))
gref = gpp - (delta_hae[:, numpy.newaxis] * ugpn)
# should we stop our iteration?
cont = (max_abs_delta_hae > tolerance) and (iters < max_iterations)
# Compute the unit slant plane normal vector, uspn, that is tangent to the R/Rdot contour at point gpp
uspn = numpy.cross(varp_coa, (gpp - arp_coa))*look[:, numpy.newaxis]
uspn /= numpy.linalg.norm(uspn, axis=-1)[:, numpy.newaxis]
# For the final straight line projection, project from point gpp along
# the slant plane normal (as opposed to the ground plane normal that was
# used in the iteration) to point slp.
sf = numpy.sum(ugpn*uspn, axis=-1)
slp = gpp - uspn*(delta_hae/sf)[:, numpy.newaxis]
# Assign surface point SPP position by adjusting the HAE to be on the
# HAE0 surface.
spp_llh = ecf_to_geodetic(slp)
spp_llh[:, 2] = hae0
spp = geodetic_to_ecf(spp_llh)
return spp
def _image_to_ground_hae(
im_points: numpy.ndarray,
coa_projection: COAProjection,
hae0: float,
tolerance: float,
max_iterations: int,
ref_hae: float,
ref_point: numpy.ndarray) -> numpy.ndarray:
"""
Intermediate helper function for projection.
Parameters
----------
im_points : numpy.ndarray
the image coordinate array
coa_projection : COAProjection
hae0 : float
tolerance : float
max_iterations : int
ref_hae : float
ref_point : numpy.ndarray
Returns
-------
numpy.ndarray
"""
# get (image formation specific) projection parameters
r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa = coa_projection.projection(im_points)
ugpn = wgs_84_norm(ref_point)
return _image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_point, ugpn,
hae0, tolerance, max_iterations, ref_hae)
def image_to_ground_hae(
im_points: Union[numpy.ndarray, list, tuple],
structure,
block_size: Optional[int] = 50000,
hae0: Optional[float] = None,
tolerance: float = 1e-3,
max_iterations: int = 10,
use_structure_coa: bool = True,
**coa_args) -> numpy.ndarray:
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
hae0 : None|float|int
Surface height (m) above the WGS-84 reference ellipsoid for projection point.
Defaults to HAE at the SCP or Reference Point.
tolerance : float|int
Height threshold for convergence of iterative constant HAE computation (m).
max_iterations : int
Maximum number of iterations allowed for constant hae computation.
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
coa_args
keyword arguments for COAProjection.from_sicd class method.
Returns
-------
numpy.ndarray
Ground Plane Point (in ECF coordinates) with target hae corresponding to
the input image coordinates.
"""
# coa projection creation
im_points, orig_shape = _validate_im_points(im_points)
coa_proj = _get_coa_projection(structure, use_structure_coa, **coa_args)
tolerance = float(tolerance)
if tolerance < 1e-12:
logger.warning(
'minimum allowed tolerance is 1e-12, resetting from {0:8f}'.format(tolerance))
tolerance = 1e-12
max_iterations = int(max_iterations)
if max_iterations < 1:
logger.error(
'max_iterations must be a positive integer, resetting to 1 from {}'.format(max_iterations))
max_iterations = 1
if max_iterations > 100:
logger.error(
'maximum allowed max_iterations is 100, resetting from {}'.format(max_iterations))
max_iterations = 100
# method parameter validation
ref_point = _get_reference_point(structure)
ref_llh = ecf_to_geodetic(ref_point)
ref_hae = float(ref_llh[2])
if hae0 is None:
hae0 = ref_hae
# prepare workspace
im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten
num_points = im_points_view.shape[0]
if block_size is None or num_points <= block_size:
coords = _image_to_ground_hae(im_points_view, coa_proj, hae0, tolerance, max_iterations, ref_hae, ref_point)
else:
coords = numpy.zeros((num_points, 3), dtype='float64')
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block + block_size, num_points)
coords[start_block:end_block, :] = _image_to_ground_hae(
im_points_view[start_block:end_block], coa_proj, hae0, tolerance, max_iterations, ref_hae, ref_point)
start_block = end_block
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (-1,))
elif len(orig_shape) > 1:
coords = numpy.reshape(coords, orig_shape[:-1] + (3,))
return coords
#####
# Image-to-DEM
def _do_dem_iteration(
previous_ecf: numpy.ndarray,
previous_diff: numpy.ndarray,
this_ecf: numpy.ndarray,
this_diff: numpy.ndarray) -> Optional[Tuple[numpy.ndarray, numpy.ndarray]]:
mask = numpy.isfinite(this_diff) & (this_diff < 0)
if numpy.any(mask):
d0 = (previous_diff[mask])
d1 = numpy.abs(this_diff[mask])
return mask, (d1[:, numpy.newaxis]*previous_ecf[mask] + d0[:, numpy.newaxis]*this_ecf[mask])/((d0+d1)[:, numpy.newaxis])
else:
return None
def _image_to_ground_dem(
im_points: numpy.ndarray,
coa_projection: COAProjection,
dem_interpolator: DEMInterpolator,
min_dem: float,
max_dem: float,
vertical_step_size: Union[float, int],
ref_hae: float,
ref_point: numpy.ndarray) -> numpy.ndarray:
"""
Parameters
----------
im_points : numpy.ndarray
coa_projection : COAProjection
dem_interpolator : DEMInterpolator
min_dem : float
max_dem : float
vertical_step_size : float|int
ref_hae: float
ref_point : numpy.ndarray
Returns
-------
numpy.ndarray
"""
# get (image formation specific) projection parameters
r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa = coa_projection.projection(im_points)
ugpn = wgs_84_norm(ref_point)
tolerance = 1e-3
max_iterations = 10
# if max_dem - min_dem is sufficiently small, then pretend it's flat
if max_dem - min_dem < vertical_step_size:
return _image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_point, ugpn, max_dem,
tolerance, max_iterations, ref_hae)
# set up workspace
out = numpy.zeros((im_points.shape[0], 3), dtype='float64')
cont_mask = numpy.ones((im_points.shape[0], ), dtype='bool')
cont = True
this_hae = max_dem
previous_coords = _image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_point, ugpn, max_dem,
tolerance, max_iterations, ref_hae)
previous_llh = ecf_to_geodetic(previous_coords)
previous_diff = previous_llh[:, 2] - dem_interpolator.get_elevation_hae(
previous_llh[:, 0], previous_llh[:, 1])
while cont:
this_hae -= vertical_step_size
this_coords = _image_to_ground_hae_perform(
r_tgt_coa[cont_mask], r_dot_tgt_coa[cont_mask], arp_coa[cont_mask], varp_coa[cont_mask],
ref_point, ugpn, this_hae, tolerance, max_iterations, ref_hae)
this_llh = ecf_to_geodetic(this_coords)
this_diff = this_llh[:, 2] - dem_interpolator.get_elevation_hae(this_llh[:, 0], this_llh[:, 1])
result = _do_dem_iteration(previous_coords, previous_diff, this_coords, this_diff)
if result is not None:
this_mask, this_result = result
temp_mask = numpy.zeros((im_points.shape[0], ), dtype='bool')
temp_mask[cont_mask] = this_mask
out[temp_mask, :] = this_result
cont_mask[temp_mask] = False
cont = numpy.any(cont_mask)
if cont:
previous_coords = this_coords[~this_mask, :]
previous_diff = this_diff[~this_mask]
else:
previous_coords = this_coords
previous_diff = this_diff
return out
def _image_to_ground_dem_block(
im_points: numpy.ndarray,
coa_projection: COAProjection,
dem_interpolator: DEMInterpolator,
horizontal_step: float,
lat_lon_box: numpy.ndarray,
block_size: Optional[int],
lat_pad: float,
lon_pad: float) -> numpy.ndarray:
"""
Parameters
----------
im_points : numpy.ndarray
coa_projection : COAProjection
dem_interpolator : DEMInterpolator
horizontal_step : float
lat_lon_box : numpy.ndarray
block_size : int|None
lat_pad : float
lon_pad : float
Returns
-------
numpy.ndarray
"""
# determine reference point
ref_lat = 0.5*(lat_lon_box[0] + lat_lon_box[1])
ref_lon = 0.5*(lat_lon_box[2] + lat_lon_box[3])
ref_hae = float(dem_interpolator.get_elevation_hae(ref_lat, ref_lon))
ref_ecf = geodetic_to_ecf([ref_lat, ref_lon, ref_hae])
# determine max/min hae in the DEM region
padded_box = numpy.array([
max(-90, lat_lon_box[0] - 0.5*lat_pad), min(lat_lon_box[1] + 0.5*lat_pad, 90),
max(-180, lat_lon_box[2] - 0.5*lon_pad), min(lat_lon_box[3] + 0.5*lon_pad, 180)], dtype='float64')
min_dem = dem_interpolator.get_min_hae(padded_box) - 10
max_dem = dem_interpolator.get_max_hae(padded_box) + 10
# prepare workspace
num_points = im_points.shape[0]
if block_size is None or num_points <= block_size:
coords = _image_to_ground_dem(
im_points, coa_projection, dem_interpolator, min_dem, max_dem,
horizontal_step, ref_hae, ref_ecf)
else:
coords = numpy.zeros((num_points, 3), dtype='float64')
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block + block_size, num_points)
coords[start_block:end_block, :] = _image_to_ground_dem(
im_points[start_block:end_block, :], coa_projection, dem_interpolator,
min_dem, max_dem, horizontal_step, ref_hae, ref_ecf)
start_block = end_block
return coords
def image_to_ground_dem(
im_points: Union[numpy.ndarray, list, tuple],
structure,
block_size: Optional[int] = 50000,
dem_interpolator: Union[str, DEMInterpolator] = None,
dem_type: Union[None, str, List[str]] = None,
geoid_file: Union[None, str, GeoidHeight] = None,
pad_value: float = 0.2,
vertical_step_size: Union[int, float] = 10,
use_structure_coa: bool = True,
**coa_args) -> numpy.ndarray:
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array
will be transformed as a single block if `None`.
dem_interpolator : str|DEMInterpolator
The DEMInterpolator. If this is a string, then a DTEDInterpolator will be
constructed assuming that this is the DTED root search directory.
dem_type : None|str|List[str]
The DEM type or list of DEM types in order of priority. Only used if
`dem_interpolator` is the search path.
geoid_file : None|str|GeoidHeight
The `GeoidHeight` object, an egm file name, or root directory containing
one of the egm files in the subdirectory "geoid". If `None`, then default
to the root directory of `dted_list`. Only used if `dem_interpolator` is
the search path.
pad_value : float
The degree value to pad by for the dem interpolator. Only used if
`dem_interpolator` is the search path.
vertical_step_size : float|int
Sampling along HAE altitude at the given resolution in meters. Bounds of
`[0.1, 100]` will be enforced by replacement.
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
coa_args
keyword arguments for COAProjection.from_sicd class method.
Returns
-------
numpy.ndarray
Physical coordinates (in ECF coordinates) with corresponding to the input image
coordinates, assuming detected features actually correspond to the DEM.
"""
def append_grid_elements(this_lon_min, this_lon_max, the_list):
assert this_lon_min <= this_lon_max
if this_lon_min == this_lon_max:
if lat_min == lat_max:
the_list.append((lat_min, lat_max, this_lon_min, this_lon_max))
else:
lat_start = lat_min
while lat_start < lat_max:
lat_end = min(lat_start + lat_grid_size, lat_max)
the_list.append((lat_start, lat_end, this_lon_min, this_lon_max))
lat_start = lat_end
else:
if lat_min == lat_max:
lon_start = this_lon_min
while lon_start < this_lon_max:
lon_end = min(lon_start + lon_grid_size, this_lon_max)
the_list.append((lat_min, lat_max, lon_start, lon_end))
lon_start = lon_end
else:
lat_start = lat_min
while lat_start < lat_max:
lon_start = this_lon_min
lat_end = min(lat_start + lat_grid_size, lat_max)
while lon_start < this_lon_max:
lon_end = min(lon_start + lon_grid_size, this_lon_max)
the_list.append((lat_start, lat_end, lon_start, lon_end))
lon_start = lon_end
lat_start = lat_end
# coa projection creation
im_points, orig_shape = _validate_im_points(im_points)
coa_proj = _get_coa_projection(structure, use_structure_coa, **coa_args)
vertical_step_size = float(vertical_step_size)
if vertical_step_size < 0.1:
vertical_step_size = 0.1
if vertical_step_size > 100:
vertical_step_size = 100
# reference point extraction
ref_ecf = _get_reference_point(structure)
ref_llh = ecf_to_geodetic(ref_ecf)
ref_hae = ref_llh[2]
# subgrid size definition
lat_grid_size = 0.03
lon_grid_size = min(10, lat_grid_size/abs(numpy.sin(numpy.deg2rad(ref_llh[0]))))
# validate the dem_interpolator
if dem_interpolator is None:
raise ValueError('dem_interpolator is None, this is unhandled.')
if isinstance(dem_interpolator, str):
dted_list = DTEDList(dem_interpolator)
dem_interpolator = DTEDInterpolator.from_reference_point(
ref_llh, dted_list, dem_type=dem_type, geoid_file=geoid_file, pad_value=pad_value)
if not isinstance(dem_interpolator, DEMInterpolator):
raise TypeError('dem_interpolator is of unsupported type {}'.format(type(dem_interpolator)))
# perform a projection to reference point hae for approximate lat/lon values
im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten
r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa = coa_proj.projection(im_points_view)
ugpn = wgs_84_norm(ref_ecf)
tolerance = 1e-3
max_iterations = 10
llh_rough = ecf_to_geodetic(_image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_ecf, ugpn, ref_hae,
tolerance, max_iterations, ref_hae))
# segment into lat/lon grid of small size for more efficient dem lookup
lat_min = numpy.min(llh_rough[:, 0])
lat_max = numpy.max(llh_rough[:, 0])
lon_min = numpy.min(llh_rough[:, 1])
lon_max = numpy.max(llh_rough[:, 1])
lat_lon_grids = []
if (lon_min < -90) and (lon_max > 90):
# there is a -180/180 crossing
append_grid_elements(numpy.min(llh_rough[(llh_rough[:, 1] > 0), 1]), 180, lat_lon_grids)
append_grid_elements(-180, numpy.max(llh_rough[(llh_rough[:, 1] < 0), 1]), lat_lon_grids)
else:
append_grid_elements(lon_min, lon_max, lat_lon_grids)
if len(lat_lon_grids) == 1:
coords = _image_to_ground_dem_block(
im_points, coa_proj, dem_interpolator, vertical_step_size,
lat_lon_grids[0], block_size, lat_grid_size, lon_grid_size)
else:
num_points = im_points_view.shape[0]
coords = numpy.zeros((num_points, 3), dtype='float64')
for entry in lat_lon_grids:
mask = ((llh_rough[:, 0] >= entry[0]) & (llh_rough[:, 0] <= entry[1]) &
(llh_rough[:, 1] >= entry[2]) & (llh_rough[:, 1] <= entry[3]))
if numpy.any(mask):
coords[mask, :] = _image_to_ground_dem_block(
im_points_view[mask, :], coa_proj, dem_interpolator, vertical_step_size,
entry, block_size, lat_grid_size, lon_grid_size)
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (-1,))
elif len(orig_shape) > 1:
coords = numpy.reshape(coords, orig_shape[:-1] + (3,))
return coords
| 75,489 | 37.574348 | 152 | py |
sarpy | sarpy-master/sarpy/geometry/geometry_elements.py | """
This module provides basic geometry elements generally geared towards (geo)json usage.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import copy
from collections import OrderedDict
from uuid import uuid4
from typing import Union, List, Tuple, Dict, Callable, Any
import json
import logging
import numpy
logger = logging.getLogger(__name__)
_poorly_formed_text = 'Poorly formed json {}'
_disallowed_text = 'Got disallowed type {}'
##########
# utility functions
def _compress_identical(coords):
"""
Eliminate consecutive points with same first two coordinates.
Parameters
----------
coords : numpy.ndarray
Returns
-------
numpy.ndarray
coords array with consecutive identical points supressed (last point retained)
"""
if coords.shape[0] < 2:
return coords
include = numpy.zeros((coords.shape[0], ), dtype='bool')
include[-1] = True
for i, (first, last) in enumerate(zip(coords[:-1, :], coords[1:, :])):
if not (first[0] == last[0] and first[1] == last[1]):
include[i] = True
return coords[include, :]
def _validate_contain_arguments(pts_x, pts_y):
# helper method for Polygon functionality
if not isinstance(pts_x, numpy.ndarray):
pts_x = numpy.array(pts_x, dtype=numpy.float64)
if not isinstance(pts_y, numpy.ndarray):
pts_y = numpy.array(pts_y, dtype=numpy.float64)
if pts_x.shape != pts_y.shape:
raise ValueError(
'pts_x and pts_y must be the same shape. Got {} and {}'.format(pts_x.shape, pts_y.shape))
return pts_x, pts_y
def _validate_grid_contain_arguments(grid_x, grid_y):
# helper method for Polygon functionality
if not isinstance(grid_x, numpy.ndarray):
grid_x = numpy.array(grid_x, dtype=numpy.float64)
if not isinstance(grid_y, numpy.ndarray):
grid_y = numpy.array(grid_y, dtype=numpy.float64)
if len(grid_x.shape) != 1 or len(grid_y.shape) != 1:
raise ValueError('grid_x and grid_y must be one dimensional.')
if numpy.any((grid_x[1:] - grid_x[:-1]) <= 0):
raise ValueError('grid_x must be monotonically increasing')
if numpy.any((grid_y[1:] - grid_y[:-1]) <= 0):
raise ValueError('grid_y must be monotonically increasing')
return grid_x, grid_y
def _get_kml_coordinate_string(coordinates, transform):
# type: (numpy.ndarray, Union[None, Callable]) -> str
def identity(x):
return x
if transform is None:
transform = identity
if coordinates.ndim == 1:
return '{0:0.9f},{1:0.9f}'.format(*transform(coordinates)[:2])
return ' '.join(
'{0:0.9f},{1:0.9f}'.format(*el[:2]) for el in transform(coordinates))
def _line_segments_intersect(pt0, pt1, pt2, pt3):
"""
Does line segment defined by points 0 & 1 internally intersect with line
segment defined by points 2 & 3? For these purposes, co-linearity will be
considered False.
Parameters
----------
pt0 : numpy.ndarray|list|tuple
pt1 : numpy.ndarray|list|tuple
pt2 : numpy.ndarray|list|tuple
pt3 : numpy.ndarray|list|tuple
Returns
-------
bool
"""
def validate(entry):
# type: (Any) -> numpy.ndarray
if not isinstance(entry, numpy.ndarray):
entry = numpy.array(entry)
if entry.ndim != 1 or entry.size != 2:
raise ValueError('all inputs must be numpy array of shape (2, )')
return entry
P = validate(pt0) # end point fo one segment
R = validate(pt1) - P # direction vector for segment (one end to the other)
Q = validate(pt2) # end point for the other line segment
S = validate(pt3) - Q # direction vector for segment (one end to the other)
if numpy.linalg.norm(R) == 0 or numpy.linalg.norm(S) == 0:
# one of these is a trivial line segment. No legitimate intersection is possible.
return False
dir_cross = float(numpy.cross(R, S)) # the scalar cross product of the direction vectors
if dir_cross == 0:
# direction vectors are parallel, we will consider all of this as False
return False
end_cross_0 = float(numpy.cross(Q-P, S))
end_cross_1 = float(numpy.cross(Q-P, R))
t = end_cross_0/dir_cross
u = end_cross_1/dir_cross
return (0 <= t <= 1 and 0 < u < 1) or (0 < t < 1 and 0 <= u <= 1)
def _validate_point_array(point):
"""
Extract array from point, or verify the input is consistent with point
definition.
Parameters
----------
point : Point|numpy.ndarray|Tuple|List
Returns
-------
numpy.ndarray
A numpy.ndarray of shape `(N, )` with `N >= 2`.
"""
if isinstance(point, Point):
return point.coordinates
if not isinstance(point, numpy.ndarray):
point = numpy.array(point, dtype='float64')
if point.ndim != 1 or point.size < 2:
raise ValueError('point input must yield a one-dimensional array of at least two elements.')
return point
def _line_segment_distance(line_coord, coord):
"""
Get the (2-d) distance from the point given by coord from the line segment defined by line_coord.
Parameters
----------
line_coord : numpy.ndarray
This is implicitly assumed to be shape (2, 2).
coord : numpy.ndarray
This is implicitly assumed to be shape (2,).
Returns
-------
float
"""
dir_vec = line_coord[1, :] - line_coord[0, :] # direction vector for segment
dir_vec /= numpy.linalg.norm(dir_vec)
norm_vec = numpy.array([dir_vec[1], -dir_vec[0]])
diff_vec0 = coord - line_coord[0, :] # vector from first end to point
diff_vec1 = coord - line_coord[1, :] # vector from last end to point
if numpy.sign(diff_vec1.dot(dir_vec)) == numpy.sign(diff_vec0.dot(dir_vec)):
# one of the endpoints is the minimum distance
return min(float(numpy.linalg.norm(diff_vec0)), float(numpy.linalg.norm(diff_vec1)))
else:
# the point is "between" the two endpoints
return float(numpy.abs(diff_vec1.dot(norm_vec)))
###############
# Geojson base object
class Jsonable(object):
"""
Abstract class for json serializability.
"""
_type = 'Jsonable'
@property
def type(self):
"""
The type identifier.
Returns
-------
str
"""
return self._type
@classmethod
def from_dict(cls, the_json):
"""
Deserialize from json.
Parameters
----------
the_json : Dict
Returns
-------
"""
raise NotImplementedError
def to_dict(self, parent_dict=None):
"""
Serialize to json.
Parameters
----------
parent_dict : None|Dict
Returns
-------
Dict
"""
raise NotImplementedError
def __str__(self):
return '{}(**{})'.format(self.__class__.__name__, json.dumps(self.to_dict(), indent=1))
def __repr__(self):
return '{}(**{})'.format(self.__class__.__name__, self.to_dict())
def copy(self):
"""
Make a deep copy of the item.
Returns
-------
"""
the_type = self.__class__
return the_type.from_dict(self.to_dict())
def replicate(self):
"""
Make a replica of the item, where uid has not been copied.
Returns
-------
"""
out_dict = self.to_dict().copy()
if 'uid' in out_dict:
del out_dict['uid']
the_type = self.__class__
return the_type.from_dict(out_dict)
#######
# Geojson object definitions
class Feature(Jsonable):
"""
Generic feature class - basic geojson functionality. Should generally be extended
to coherently handle properties for specific use case.
"""
__slots__ = ('_uid', '_geometry', '_properties')
_type = 'Feature'
def __init__(self, uid=None, geometry=None, properties=None):
self._geometry = None
self._properties = None
self.geometry = geometry
self.properties = properties
if uid is None and isinstance(properties, dict):
uid = properties.get('identifier', None)
if uid is None:
self._uid = str(uuid4())
elif not isinstance(uid, str):
raise TypeError('uid must be a string.')
else:
self._uid = uid
@property
def uid(self):
"""
The feature unique identifier.
Returns
-------
str
"""
return self._uid
@property
def geometry(self):
"""
The geometry object.
Returns
-------
GeometryObject|GeometryCollection
"""
return self._geometry
@geometry.setter
def geometry(self, geometry):
if geometry is None:
self._geometry = None
elif isinstance(geometry, Geometry):
self._geometry = geometry
elif isinstance(geometry, dict):
self._geometry = Geometry.from_dict(geometry)
else:
raise TypeError('geometry must be an instance of Geometry base class')
@property
def properties(self): # type: () -> Union[None, int, float, str, list, dict, Jsonable]
"""
The properties.
Returns
-------
None|int|float|str|dict|list|Jsonable: The properties.
"""
return self._properties
@properties.setter
def properties(self, properties):
if not isinstance(properties, (int, float, str, dict, list, Jsonable)):
logger.warning(
'Got unexpected type `{}` for properties.\n\t'
'This may effect serialization ability'.format(type(properties)))
self._properties = properties
@classmethod
def from_dict(cls, the_json):
typ = the_json['type']
if typ != cls._type:
raise ValueError('Feature cannot be constructed from {}'.format(the_json))
the_id = the_json.get('id', None)
if the_id is None:
the_id = the_json.get('uid', None)
return cls(uid=the_id,
geometry=the_json.get('geometry', None),
properties=the_json.get('properties', None))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['id'] = self.uid
if self.geometry is not None:
parent_dict['geometry'] = self.geometry.to_dict()
if self.properties is not None:
if isinstance(self.properties, (int, float, str, list, dict)):
parent_dict['properties'] = self.properties
elif isinstance(self.properties, Jsonable):
parent_dict['properties'] = self.properties.to_dict()
else:
logger.warning(
'Got unexpected Feature properties type `{}`,'
'\n\tnot serializing'.format(type(self.properties)))
return parent_dict
def add_to_kml(self, doc, coord_transform, parent=None):
"""
Add this feature to the kml document. **Note that coordinates or transformed
coordinates are assumed to be WGS-84 coordinates in longitude, latitude order.**
Currently only the first two (i.e. longitude and latitude) are used in
this export.
Parameters
----------
doc : sarpy.io.kml.Document
coord_transform : None|callable
If callable, the the transform will be applied to the coordinates before
adding to the document.
parent : None|minidom.Element
The parent node.
Returns
-------
None
"""
params = {}
if self.uid is not None:
params['id'] = self.uid
if self.properties is not None:
params['description'] = str(self.properties)
placemark = doc.add_container(par=parent, typ='Placemark', **params)
if self.geometry is not None:
self.geometry.add_to_kml(doc, placemark, coord_transform)
def replicate(self):
geometry = None if self.geometry is None else self.geometry.replicate()
old_properties = self.properties
if old_properties is None:
new_properties = None
elif isinstance(old_properties, Jsonable):
new_properties = old_properties.replicate()
else:
new_properties = copy.deepcopy(old_properties)
the_type = self.__class__
return the_type(geometry=geometry, properties=new_properties)
class FeatureCollection(Jsonable):
"""
Generic FeatureCollection class - basic geojson functionality. Should generally be
extended to coherently handle specific Feature extension.
"""
__slots__ = ('_features', '_feature_dict')
_type = 'FeatureCollection'
def __init__(self, features=None):
self._features = None
self._feature_dict = None
if features is not None:
self.features = features
def __len__(self):
if self._features is None:
return 0
return len(self._features)
def __getitem__(self, item):
# type: (Any) -> Union[Feature, List[Feature]]
if self._features is None:
raise StopIteration
if isinstance(item, str):
index = self._feature_dict[item]
return self._features[index]
return self._features[item]
def __delitem__(self, item):
# type: (Any) -> None
if self._features is None:
return
if isinstance(item, Feature):
item = Feature.uid
if not isinstance(item, (str, int)):
raise ValueError('Unexpected type `{}`'.format(type(item)))
if isinstance(item, str):
index = self._feature_dict[item]
del self._features[index]
else:
del self._features[item]
self._rebuild_feature_dict()
@property
def features(self):
"""
The features list.
Returns
-------
List[Feature]
"""
return self._features
@features.setter
def features(self, features):
if features is None:
self._features = None
self._feature_dict = None
return
if not isinstance(features, list):
raise TypeError('features must be a list of features. Got {}'.format(type(features)))
for entry in features:
if isinstance(entry, Feature):
self.add_feature(entry)
elif isinstance(entry, dict):
self.add_feature(Feature.from_dict(entry))
else:
raise TypeError(
'Entries of features are required to be instances of Feature or '
'dictionary to be deserialized. Got {}'.format(type(entry)))
def get_integer_index(self, feature_id):
"""
Gets the integer index for the given feature id.
Parameters
----------
feature_id : str
Returns
-------
int
"""
return self._feature_dict[feature_id]
def _rebuild_feature_dict(self):
self._feature_dict = {}
for i, entry in enumerate(self._features):
self._feature_dict[entry.uid] = i
@classmethod
def from_dict(cls, the_json):
typ = the_json['type']
if typ != cls._type:
raise ValueError('FeatureCollection cannot be constructed from {}'.format(the_json))
return cls(features=the_json['features'])
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
if self._features is None:
parent_dict['features'] = None
else:
parent_dict['features'] = [entry.to_dict() for entry in self._features]
return parent_dict
def add_feature(self, feature):
"""
Add a feature.
Parameters
----------
feature : Feature
Returns
-------
None
"""
if not isinstance(feature, Feature):
raise TypeError('This requires a Feature instance, got {}'.format(type(feature)))
if self._features is None:
self._feature_dict = {feature.uid: 0}
self._features = [feature, ]
else:
self._feature_dict[feature.uid] = len(self._features)
self._features.append(feature)
def export_to_kml(self, file_name, coord_transform=None, **params):
"""
Export to a kml document. **Note that underlying geometry coordinates or
transformed coordinates are assumed in longitude, latitude order.**
Currently only the first two (i.e. longitude and latitude) are used in this export.
Parameters
----------
file_name : str|zipfile.ZipFile|file like
coord_transform : None|callable
The coordinate transform function.
params : dict
Returns
-------
None
"""
from sarpy.io.kml import Document as KML_Document
with KML_Document(file_name=file_name, **params) as doc:
if self.features is not None:
for feat in self.features:
feat.add_to_kml(doc, coord_transform)
def replicate(self):
features = [feat.replicate() for feat in self.features]
the_type = self.__class__
return the_type(features=features)
class Geometry(Jsonable):
"""
Abstract Geometry base class.
"""
_type = 'Geometry'
_is_collection = False
@classmethod
def from_dict(cls, geometry):
"""
Deserialize from json.
Parameters
----------
geometry : Dict
Returns
-------
"""
typ = geometry['type']
if typ == 'GeometryCollection':
obj = GeometryCollection.from_dict(geometry)
return obj
else:
obj = GeometryObject.from_dict(geometry)
return obj
def to_dict(self, parent_dict=None):
raise NotImplementedError
def add_to_kml(self, doc, parent, coord_transform):
"""
Add the geometry to the kml document. **Note that coordinates or transformed
coordinates are assumed in longitude, latitude order.**
Parameters
----------
doc : sarpy.io.kml.Document
parent : xml.dom.minidom.Element
coord_transform : None|callable
Returns
-------
None
"""
raise NotImplementedError
def apply_projection(self, proj_method):
"""
Gets a new version after applying a transform method.
Parameters
----------
proj_method : callable
Returns
-------
Geometry
"""
raise NotImplementedError
def get_bbox(self):
"""
Get the bounding box list.
Returns
-------
None|List
Of the form [min coord 0, min coord 1, ..., max coord 0, max coord 1, ...]/
"""
raise NotImplementedError
@property
def is_collection(self):
"""
bool: Is this a collection object?
"""
return self._is_collection
class GeometryCollection(Geometry):
"""
Geometry collection - following the geojson structure
"""
__slots__ = ('_geometries', )
_type = 'GeometryCollection'
_is_collection = True
def __init__(self, geometries=None):
"""
Parameters
----------
geometries : None|List[Geometry]
"""
self._geometries = []
if geometries is not None:
self.geometries = geometries
@property
def collection(self):
return self.geometries
@property
def geometries(self):
# type: () -> List[Geometry]
"""
List[Geometry]: The geometry collection.
"""
return self._geometries
@geometries.setter
def geometries(self, geometries):
if geometries is None:
self._geometries = []
return
elif not isinstance(geometries, list):
raise TypeError(
'geometries must be None or a list of Geometry objects. Got type {}'.format(type(geometries)))
elif len(geometries) < 2:
logger.warning('GeometryCollection should contain a list of geometries with length greater than 1.')
self._geometries = []
for entry in geometries:
if isinstance(entry, dict):
entry = Geometry.from_dict(entry)
if not isinstance(entry, Geometry):
raise TypeError(
'geometries must be a list of Geometry objects. Got an element of type {}'.format(type(entry)))
self._geometries.append(entry)
def get_bbox(self):
if self._geometries is None:
return None
mins = [None, None, None]
maxs = [None, None, None]
for geometry in self.geometries:
t_bbox = geometry.get_bbox()
coord_count = int(len(t_bbox)/2)
for i in range(min(coord_count, 3)):
entry = t_bbox[i]
if mins[i] is None or entry < mins[i]:
mins[i] = entry
entry = t_bbox[coord_count+i]
if maxs[i] is None or entry > maxs[i]:
maxs[i] = entry
if mins[2] is None:
mins = mins[:2]
maxs = maxs[:2]
mins.extend(maxs)
return mins
@classmethod
def from_dict(cls, geometry):
# type: (Union[None, Dict]) -> GeometryCollection
typ = geometry.get('type', None)
if typ != cls._type:
raise ValueError('GeometryCollection cannot be constructed from {}'.format(geometry))
geometries = []
if geometry['geometries'] is not None:
for entry in geometry['geometries']:
if isinstance(entry, Geometry):
geometries.append(entry)
elif isinstance(entry, dict):
geometries.append(Geometry.from_dict(entry))
else:
raise TypeError(
'The geometries attribute must contain either a Geometry or json serialization of a Geometry. '
'Got an entry of type {}'.format(type(entry)))
return cls(geometries)
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['geometries'] = [entry.to_dict() for entry in self.geometries]
return parent_dict
def add_to_kml(self, doc, parent, coord_transform):
if self.geometries is None:
return
multigeometry = doc.add_multi_geometry(parent)
for geometry in self.geometries:
if geometry is not None:
geometry.add_to_kml(doc, multigeometry, coord_transform)
def apply_projection(self, proj_method):
"""
Gets a new version after applying a transform method.
Parameters
----------
proj_method : callable
Returns
-------
GeometryObject
"""
if self.geometries is None:
return GeometryCollection()
return GeometryCollection(geometries=[geom.apply_projection(proj_method) for geom in self.geometries])
@classmethod
def assemble_from_collection(cls, *args):
"""
Assemble a geometry collection from the input constituents.
Parameters
----------
args
A list of input GeometryObjects
Returns
-------
GeometryCollection
"""
def handle_arg(arg_in):
if isinstance(arg_in, (Point, LineString, Polygon)):
geometries.append(arg_in)
elif arg_in.is_collection:
for entry in arg_in.collection:
handle_arg(entry)
else:
raise ValueError('Got unhandled argument type `{}`'.format(type(arg)))
if len(args) == 0:
return cls()
geometries = []
for arg in args:
handle_arg(arg)
return cls(geometries=geometries)
class GeometryObject(Geometry):
"""
Abstract geometry object class - mirrors basic geojson functionality
"""
_type = 'Geometry'
def get_coordinate_list(self):
"""
The geojson style coordinate list.
Returns
-------
List
"""
raise NotImplementedError
def get_bbox(self):
raise NotImplementedError
@classmethod
def from_dict(cls, geometry):
# type: (Dict) -> GeometryObject
typ = geometry.get('type', None)
if typ is None:
raise ValueError('Poorly formed json for GeometryObject {}'.format(geometry))
elif typ == 'Point':
return Point(coordinates=geometry['coordinates'])
elif typ == 'MultiPoint':
return MultiPoint(coordinates=geometry['coordinates'])
elif typ == 'LineString':
return LineString(coordinates=geometry['coordinates'])
elif typ == 'MultiLineString':
return MultiLineString(coordinates=geometry['coordinates'])
elif typ == 'Polygon':
return Polygon(coordinates=geometry['coordinates'])
elif typ == 'MultiPolygon':
return MultiPolygon(coordinates=geometry['coordinates'])
else:
raise ValueError('Unknown type {} for GeometryObject from json {}'.format(typ, geometry))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['coordinates'] = self.get_coordinate_list()
return parent_dict
def add_to_kml(self, doc, parent, coord_transform):
raise NotImplementedError
def apply_projection(self, proj_method):
"""
Gets a new version after applying a transform method.
Parameters
----------
proj_method : callable
Returns
-------
GeometryObject
"""
raise NotImplementedError
def get_minimum_distance(self, point):
"""
Get the minimum distance from the point, to the point or line segments of
the given geometrical shape. This just assumes two-dimensional coordinates.
Parameters
----------
point : Point|numpy.ndarray|tuple|list
Returns
-------
float
"""
raise NotImplementedError
class Point(GeometryObject):
"""
A geometric point.
"""
__slots__ = ('_coordinates', )
_type = 'Point'
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : None|numpy.ndarray|List[float]|Point
"""
self._coordinates = None
if coordinates is not None:
self.coordinates = coordinates
@property
def coordinates(self):
"""
numpy.ndarray: The coordinate array.
"""
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates):
# type: (Union[None, List, Tuple, numpy.ndarray]) -> None
if coordinates is None:
self._coordinates = None
return
if not isinstance(coordinates, numpy.ndarray):
coordinates = numpy.array(coordinates, dtype=numpy.float64)
if coordinates.ndim != 1:
raise ValueError(
'coordinates must be a one-dimensional array. Got shape {}'.format(coordinates.shape))
elif not (2 <= coordinates.size <= 4):
raise ValueError(
'coordinates must have between 2 and 4 entries. Got shape {}'.format(coordinates.shape))
else:
self._coordinates = coordinates
def get_bbox(self):
if self._coordinates is None:
return None
out = self._coordinates.tolist()
out.extend(self._coordinates.tolist())
return out
def get_coordinate_list(self):
if self._coordinates is None:
return None
else:
return self._coordinates.tolist()
@classmethod
def from_dict(cls, geometry):
# type: (Dict) -> Point
if not geometry.get('type', None) == cls._type:
raise ValueError(_poorly_formed_text.format(geometry))
return cls(coordinates=geometry['coordinates'])
def add_to_kml(self, doc, parent, coord_transform):
if self.coordinates is None:
return
doc.add_point(_get_kml_coordinate_string(self.coordinates, coord_transform), par=parent)
def apply_projection(self, proj_method):
# type: (callable) -> Point
return Point(coordinates=proj_method(self._coordinates))
def get_minimum_distance(self, point):
if self._coordinates is None:
return None
point = _validate_point_array(point)
diff = self.coordinates[:2] - point[:2]
return float(numpy.linalg.norm(diff))
class MultiPoint(GeometryObject):
"""
A collection of geometric points.
"""
_type = 'MultiPoint'
__slots__ = ('_points', )
_is_collection = True
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : None|numpy.ndarray|List[float]|List[Point]|MultiPoint
"""
self._points = None
if isinstance(coordinates, MultiPoint):
coordinates = coordinates.get_coordinate_list()
if coordinates is not None:
self.points = coordinates
@property
def collection(self):
return self.points
@property
def points(self):
# type: () -> List[Point]
"""
List[Point]: The point collection.
"""
return self._points
@points.setter
def points(self, points):
if points is None:
self._points = None
if isinstance(points, numpy.ndarray):
points = points.tolist()
if not isinstance(points, list):
raise TypeError(
'Multipoint requires that points is None or a list of points. '
'Got type {}'.format(type(points)))
pts = []
for entry in points:
if isinstance(entry, Point):
pts.append(entry)
else:
pts.append(Point(coordinates=entry))
self._points = pts
def get_bbox(self):
if self._points is None:
return None
# create our output space
siz = max(point.coordinates.size for point in self.points)
mins = [None, ]*siz
maxs = [None, ]*siz
for element in self.get_coordinate_list():
for i, entry in enumerate(element):
if mins[i] is None or (entry < mins[i]):
mins[i] = entry
if maxs[i] is None or (entry > maxs[i]):
maxs[i] = entry
mins.extend(maxs)
return mins
def get_coordinate_list(self):
if self._points is None:
return None
return [point.get_coordinate_list() for point in self._points]
@classmethod
def from_dict(cls, geometry):
# type: (Dict) -> MultiPoint
if not geometry.get('type', None) == cls._type:
raise ValueError(_poorly_formed_text.format(geometry))
return cls(coordinates=geometry['coordinates'])
def add_to_kml(self, doc, parent, coord_transform):
if self._points is None:
return
multigeometry = doc.add_multi_geometry(parent)
for geometry in self._points:
if geometry is not None:
geometry.add_to_kml(doc, multigeometry, coord_transform)
def apply_projection(self, proj_method):
# type: (callable) -> MultiPoint
return MultiPoint(coordinates=[pt.apply_projection(proj_method) for pt in self.points])
def get_minimum_distance(self, point):
if self._points is None:
return float('inf')
return min(entry.get_minimum_distance(point) for entry in self.points)
@classmethod
def assemble_from_collection(cls, *args):
"""
Assemble a multipoint collection from input constituents.
Parameters
----------
args
A list of input Point and MultiPoint objects.
Returns
-------
MultiPoint
"""
def handle_arg(arg_in):
if isinstance(arg_in, Point):
points.append(arg_in)
elif isinstance(arg_in, MultiPoint):
points.extend(arg_in.points)
elif isinstance(arg_in, GeometryCollection):
for entry in arg_in.geometries:
handle_arg(entry)
else:
raise ValueError(_disallowed_text.format(type(arg_in)))
if len(args) == 0:
return cls()
points = []
for arg in args:
handle_arg(arg)
return cls(points)
class LineString(GeometryObject):
"""
A geometric line.
"""
__slots__ = ('_coordinates', )
_type = 'LineString'
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : None|numpy.ndarray|List[List[float]]|LineString|LinearRing
"""
self._coordinates = None
if isinstance(coordinates, (LineString, LinearRing)):
coordinates = coordinates.get_coordinate_list()
if coordinates is not None:
self.coordinates = coordinates
@property
def coordinates(self):
# type: () -> numpy.ndarray
"""
numpy.ndarray: The coordinate array.
"""
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates):
# type: (Union[None, List, Tuple, numpy.ndarray]) -> None
if coordinates is None:
self._coordinates = None
return
if not isinstance(coordinates, numpy.ndarray):
coordinates = numpy.array(coordinates, dtype=numpy.float64)
if coordinates.ndim != 2:
raise ValueError(
'coordinates must be a two-dimensional array. '
'Got shape {}'.format(coordinates.shape))
if not (2 <= coordinates.shape[1] <= 4):
raise ValueError(
'The second dimension of coordinates must have between 2 and 4 entries. '
'Got shape {}'.format(coordinates.shape))
if coordinates.shape[0] < 2:
logger.info(
'LineString coordinates should consist of at least 2 points.\n\t'
'Got shape {}'.format(coordinates.shape))
coordinates = _compress_identical(coordinates)
if coordinates.shape[0] < 2:
logger.info(
'coordinates should consist of at least 2 points after\n\t'
'suppressing consecutive repeated points.\n\t'
'Got shape {}'.format(coordinates.shape))
self._coordinates = coordinates
def self_intersection(self):
"""
Does this self intersect?
Returns
-------
bool
"""
if self.coordinates.shape[0] <= 3:
return False
for i in range(self.coordinates.shape[0] - 3):
for j in range(i+1, self.coordinates.shape[0] - 1):
result = _line_segments_intersect(
self.coordinates[i, :], self.coordinates[i+1, :], self.coordinates[j, :], self.coordinates[j+1, :])
if result:
return True
return False
def get_bbox(self):
if self._coordinates is None:
return None
mins = numpy.min(self.coordinates, axis=0)
maxs = numpy.max(self.coordinates, axis=0)
min_list = mins.tolist()
max_list = maxs.tolist()
assert(isinstance(min_list, list))
assert (isinstance(max_list, list))
min_list.extend(max_list)
return min_list
def get_coordinate_list(self):
if self._coordinates is None:
return None
else:
return self._coordinates.tolist()
@classmethod
def from_dict(cls, geometry):
# type: (dict) -> LineString
if not geometry.get('type', None) == cls._type:
raise ValueError(_poorly_formed_text.format(geometry))
return cls(coordinates=geometry['coordinates'])
def get_length(self):
"""
Gets the length of the line.
Returns
-------
None|float
"""
if self._coordinates is None:
return None
diffs = self._coordinates[1:, :] - self._coordinates[:-1, :]
return float(numpy.sum(numpy.sqrt(diffs[:, 0]*diffs[:, 0] + diffs[:, 1]*diffs[:, 1])))
def add_to_kml(self, doc, parent, coord_transform):
if self.coordinates is None:
return
doc.add_line_string(_get_kml_coordinate_string(self.coordinates, coord_transform), par=parent)
def apply_projection(self, proj_method):
# type: (callable) -> LineString
return LineString(coordinates=proj_method(self.coordinates))
def get_minimum_distance(self, point):
if self._coordinates is None:
return float('inf')
p_coord = _validate_point_array(point)[:2]
if self._coordinates.shape[0] == 1:
return float(numpy.linalg.norm(self._coordinates[0, :] - p_coord))
elif self._coordinates.shape[0] == 2:
return _line_segment_distance(self._coordinates[:, :2], p_coord)
else:
return min(
_line_segment_distance(self._coordinates[i:i+2, :2], p_coord)
for i in range(self._coordinates.shape[0]-1))
class MultiLineString(GeometryObject):
"""
A collection of geometric lines.
"""
__slots__ = ('_lines', )
_type = 'MultiLineString'
_is_collection = True
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : None|List[numpy.ndarray]|List[List[List[float]]]|List[LineString]|MultiLineString
"""
self._lines = None
if isinstance(coordinates, MultiLineString):
coordinates = coordinates.get_coordinate_list()
if coordinates is not None:
self.lines = coordinates
@property
def collection(self):
return self.lines
@property
def lines(self):
# type: () -> List[LineString]
"""
List[LineString]: The line collection.
"""
return self._lines
@lines.setter
def lines(self, lines):
if lines is None:
self._lines = None
return
if not isinstance(lines, list):
raise TypeError(
'MultiLineString requires that lines is None or a list of LineStrings. '
'Got type {}'.format(type(lines)))
lins = []
for entry in lines:
if isinstance(entry, LineString):
lins.append(entry)
else:
lins.append(LineString(coordinates=entry))
self._lines = lins
def get_bbox(self):
if self._lines is None:
return None
siz = max(line.coordinates.shape[1] for line in self.lines)
mins = [None, ]*siz
maxs = [None, ]*siz
for line in self.lines:
t_bbox = line.get_bbox()
num_mins = len(t_bbox)//2
for i, entry in enumerate(t_bbox):
if(i < num_mins):
if mins[i] is None or entry < mins[i]:
mins[i] = entry
else:
if maxs[i-num_mins] is None or entry > maxs[i-num_mins]:
maxs[i-num_mins] = entry
mins.extend(maxs)
return mins
def get_coordinate_list(self):
if self._lines is None:
return None
return [line.get_coordinate_list() for line in self._lines]
@classmethod
def from_dict(cls, geometry):
# type: (Dict) -> MultiLineString
if not geometry.get('type', None) == cls._type:
raise ValueError(_poorly_formed_text.format(geometry))
return cls(coordinates=geometry['coordinates'])
def get_length(self):
"""
Gets the length of the lines.
Returns
-------
None|float
"""
if self._lines is None:
return None
return sum(entry.get_length() for entry in self._lines)
def add_to_kml(self, doc, parent, coord_transform):
if self._lines is None:
return
multigeometry = doc.add_multi_geometry(parent)
for geometry in self._lines:
if geometry is not None:
geometry.add_to_kml(doc, multigeometry, coord_transform)
def apply_projection(self, proj_method):
# type: (callable) -> MultiLineString
return MultiLineString(coordinates=[line.apply_projection(proj_method) for line in self.lines])
def get_minimum_distance(self, point):
if self._lines is None:
return float('inf')
return min(entry.get_minimum_distance(point) for entry in self.lines)
@classmethod
def assemble_from_collection(cls, *args):
"""
Assemble a multiline collection from input constituents.
Parameters
----------
args
A list of input LineString and MultiLineString objects.
Returns
-------
MultiLineString
"""
def handle_arg(arg_in):
if isinstance(arg_in, LineString):
points.append(arg_in)
elif isinstance(arg_in, MultiLineString):
points.extend(arg_in.lines)
elif isinstance(arg_in, GeometryCollection):
for entry in arg_in.geometries:
handle_arg(entry)
else:
raise ValueError(_disallowed_text.format(type(arg_in)))
if len(args) == 0:
return cls()
points = []
for arg in args:
handle_arg(arg)
return cls(points)
class LinearRing(LineString):
"""
This is not directly a valid geojson member, but plays the role of a single
polygonal element, and is only used as a Polygon constituent.
"""
__slots__ = ('_coordinates', '_diffs', '_bounding_box', '_segmentation', '_orientation')
_type = 'LinearRing'
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : None|numpy.ndarray|List[List[float]]|LinearRing|LineString
"""
self._coordinates = None
self._diffs = None
self._bounding_box = None
self._segmentation = None
self._orientation = 1
if isinstance(coordinates, (LineString, LinearRing)):
coordinates = coordinates.get_coordinate_list()
super(LinearRing, self).__init__(coordinates)
def get_coordinate_list(self):
if self._coordinates is None:
return None
else:
return self._coordinates.tolist()
def reverse_orientation(self):
if self._coordinates is None:
return
self.coordinates = self._coordinates[::-1, :]
self._orientation *= -1
@property
def orientation(self):
"""
int: +1 for positive orientation (counter-clockwise) and -1 for negative orientation (clockwise).
"""
return self._orientation
@property
def bounding_box(self):
"""
The bounding box of the form [[x_min, x_max], [y_min, y_max]].
*Note that would be extremely misleading for a naively constructed
lat/lon polygon crossing the boundary of discontinuity and/or surrounding a pole.*
Returns
-------
numpy.ndarray
"""
return self._bounding_box
def get_perimeter(self):
"""
Gets the perimeter of the linear ring.
Returns
-------
float
"""
return self.get_length()
def get_area(self):
"""
Gets the area of the polygon. If a polygon is self-intersecting, then this
result may be pathological. A positive value represents a polygon with positive
orientation, while a negative value represents a polygon with negative orientation.
Returns
-------
float
"""
return float(
0.5*numpy.sum(self._coordinates[:-1, 0]*self._coordinates[1:, 1] -
self._coordinates[1:, 0]*self._coordinates[:-1, 1]))
def get_centroid(self):
"""
Gets the centroid of the polygon - note that this may not actually lie in
the polygon interior for non-convex polygon. This will result in an undefined value
if the polygon is degenerate.
Returns
-------
numpy.ndarray
"""
arr = self._coordinates[:-1, 0]*self._coordinates[1:, 1] - \
self._coordinates[1:, 0]*self._coordinates[:-1, 1]
area = 0.5*numpy.sum(arr) # signed area
x = numpy.sum(0.5*(self._coordinates[:-1, 0] + self._coordinates[1:, 0])*arr)
y = numpy.sum(0.5*(self._coordinates[:-1, 1] + self._coordinates[1:, 1])*arr)
return numpy.array([x, y], dtype=numpy.float64)/(3*area)
@property
def coordinates(self):
"""
Gets the coordinates array.
Returns
-------
numpy.ndarray
"""
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates):
self.set_coordinates(coordinates)
def set_coordinates(self, coordinates):
if coordinates is None:
self._coordinates = None
self._bounding_box = None
self._segmentation = None
self._diffs = None
return
if not isinstance(coordinates, numpy.ndarray):
# noinspection PyTypeChecker
coordinates = numpy.array(coordinates, dtype=numpy.float64)
if len(coordinates.shape) != 2:
raise ValueError(
'coordinates must be two-dimensional. Got shape {}'.format(coordinates.shape))
if not (2 <= coordinates.shape[1] <= 4):
raise ValueError('The second dimension of coordinates must have between 2 and 4 entries. '
'Got shape {}'.format(coordinates.shape))
if coordinates.shape[0] < 3:
logger.info(
'coordinates must consist of at least 3 points.\n\t'
'Got shape {}'.format(coordinates.shape))
coordinates = _compress_identical(coordinates)
if (coordinates[0, 0] != coordinates[-1, 0]) or \
(coordinates[0, 1] != coordinates[-1, 1]):
coordinates = numpy.vstack((coordinates, coordinates[0, :]))
if coordinates.shape[0] < 4:
logger.info(
'After compressing repeated (in sequence) points and\n\t'
'ensuring first and last point are the same,\n\t'
'coordinates must contain at least 4 points.\n\t'
'Got shape {}'.format(coordinates.shape))
self._coordinates = coordinates
# construct bounding box
self._bounding_box = numpy.empty((2, 2), dtype=coordinates.dtype)
self._bounding_box[0, :] = (numpy.min(coordinates[:, 0]), numpy.max(coordinates[:, 0]))
self._bounding_box[1, :] = (numpy.min(coordinates[:, 1]), numpy.max(coordinates[:, 1]))
# construct diffs
self._diffs = coordinates[1:, :] - coordinates[:-1, :]
self._segmentation = {
'x': self._construct_segmentation(coordinates[:, 0], coordinates[:, 1]),
'y': self._construct_segmentation(coordinates[:, 1], coordinates[:, 0])}
signed_area = self.get_area()
if signed_area >= 0:
self._orientation = 1
else:
self._orientation = -1
@staticmethod
def _construct_segmentation(coords, o_coords):
# helper method
def overlap(fst, lst, segment):
if fst == lst and fst == segment['min']:
return 1 # contained
if fst >= segment['max']:
return 0 # above the segment
if lst <= segment['min']:
return 2 # below the segment
return 1 # contained
def do_min_val_value(segment, val1, val2):
segment['min_value'] = min(val1, val2, segment['min_value'])
segment['max_value'] = max(val1, val2, segment['max_value'])
if len(coords) == 1:
return (
{'min': coords[0], 'max': coords[0], 'inds': [0, ],
'min_value': numpy.inf, 'max_value': -numpy.inf}, )
inds = numpy.argsort(coords[:-1])
segments = []
beg_val = coords[inds[0]]
val = None
for ind in inds[1:]:
val = coords[ind]
if val > beg_val: # make a new segment
segments.append(
{'min': beg_val, 'max': val, 'inds': [],
'min_value': numpy.inf, 'max_value': -numpy.inf})
beg_val = val
else:
# it may have ended without appending the segment
if val > beg_val:
segments.append(
{'min': beg_val, 'max': val, 'inds': [],
'min_value': numpy.inf, 'max_value': -numpy.inf})
del beg_val, val
# order our segments based on smallest value in the given dimension, for fast analysis
this_sides = []
for i, (beg_value, end_value, ocoord1, ocoord2) in \
enumerate(zip(coords[:-1], coords[1:], o_coords[:-1], o_coords[1:])):
first, last = (beg_value, end_value) if beg_value <= end_value else (end_value, beg_value)
this_sides.append((first, last, i, ocoord1, ocoord2))
# now, let's populate the inds lists and min/max_values elements for the segmentation
start_segment = 0
for entry in sorted(this_sides, key=lambda x: x[0]):
for j in range(start_segment, len(segments)):
seg = segments[j]
overlap_state = overlap(entry[0], entry[1], seg)
if overlap_state == 0:
start_segment += 1
elif overlap_state == 1:
seg['inds'].append(entry[2])
do_min_val_value(seg, entry[3], entry[4])
else:
break
return tuple(segments)
def _contained_segment_data(self, x, y):
"""
This is a helper function for the polygon containment effort.
This determines whether the x or y segmentation should be utilized, and
the details for doing so.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
Returns
-------
(int|None, int|None, str)
the segment index start (inclusive), the segment index end (exclusive),
and "x" or "y" for which segmentation is better.
"""
def segment(coord, segments):
tmin = coord.min()
tmax = coord.max()
if tmax < segments[0]['min'] or tmin > segments[-1]['max']:
return None, None
if len(segments) == 1:
return 0, 1
t_first_ind = None if tmin > segments[0]['max'] else 0
t_last_ind = None if tmax < segments[-1]['min'] else len(segments)
for i, seg in enumerate(segments):
if seg['min'] <= tmin < seg['max']:
t_first_ind = i
if seg['min'] <= tmax <= seg['max']:
t_last_ind = i+1
if t_first_ind is not None and t_last_ind is not None:
break
return t_first_ind, t_last_ind
# let's determine first/last x & y segments and which is better (fewer)
x_first_ind, x_last_ind = segment(x, self._segmentation['x'])
if x_first_ind is None:
return None, None, 'x'
y_first_ind, y_last_ind = segment(y, self._segmentation['y'])
if y_first_ind is None:
return None, None, 'y'
if (y_last_ind - y_first_ind) <= (x_last_ind - x_first_ind):
return y_first_ind, y_last_ind, 'y'
return x_first_ind, x_last_ind, 'x'
def _contained_do_segment(self, x, y, segment, direction):
"""
Helper function for polygon containment effort.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
segment : dict
direction : str
Returns
-------
numpy.ndarray
"""
# we require that all these points are relevant to this slice
in_poly = numpy.zeros(x.shape, dtype='bool')
crossing_counts = numpy.zeros(x.shape, dtype=numpy.int32)
indices = segment['inds']
orient = self.orientation
for i in indices:
if direction == 'x' and self._coordinates[i, 0] == self._coordinates[i+1, 0]:
# we are segmented horizontally and processing vertically.
# This is a vertical line - only consider inclusion.
y_min = min(self._coordinates[i, 1], self._coordinates[i+1, 1])
y_max = max(self._coordinates[i, 1], self._coordinates[i+1, 1])
# points on the edge are included
in_poly[(x == self._coordinates[i, 0]) & (y_min <= y) & (y <= y_max)] = True
elif direction == 'y' and self._coordinates[i, 1] == self._coordinates[i+1, 1]:
# we are segmented vertically and processing horizontally.
# This is a horizontal line - only consider inclusion.
x_min = min(self._coordinates[i, 0], self._coordinates[i+1, 0])
x_max = max(self._coordinates[i, 0], self._coordinates[i+1, 0])
# points on the edge are included
in_poly[(y == self._coordinates[i, 1]) & (x_min <= x) & (x <= x_max)] = True
else:
nx, ny = self._diffs[i, 1], -self._diffs[i, 0]
crossing = orient*((x - self._coordinates[i, 0])*nx + (y - self._coordinates[i, 1])*ny)
# dot product of vector connecting (x, y) to segment vertex with normal vector of segment
crossing_counts[crossing > 0] += 1 # positive crossing number
crossing_counts[crossing < 0] -= 1 # negative crossing number
# points on the edge are included
in_poly[(crossing == 0)] = True
in_poly |= (crossing_counts != 0)
return in_poly
def _contained(self, x, y):
"""
Helper method for polygon inclusion.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
Returns
-------
numpy.ndarray
"""
out = numpy.zeros(x.shape, dtype='bool')
ind_beg, ind_end, direction = self._contained_segment_data(x, y)
if ind_beg is None:
return out # it missed the whole bounding box
for index in range(ind_beg, ind_end):
if direction == 'x':
seg = self._segmentation['x'][index]
mask = ((x >= seg['min']) & (x <= seg['max']) & (y >= seg['min_value']) & (y <= seg['max_value']))
else:
seg = self._segmentation['y'][index]
mask = ((y >= seg['min']) & (y <= seg['max']) & (x >= seg['min_value']) & (x <= seg['max_value']))
if numpy.any(mask):
out[mask] = self._contained_do_segment(x[mask], y[mask], seg, direction)
return out
def contain_coordinates(self, pts_x, pts_y, block_size=None):
"""
Determines inclusion of the given points in the interior of the polygon.
The methodology here is based on the Jordan curve theorem approach.
** Warning - This method may provide erroneous results for a lat/lon polygon
crossing the bound of discontinuity and/or surrounding a pole.**
Note - If the points constitute an x/y grid, then the grid contained method will
be much more performant.
Parameters
----------
pts_x : numpy.ndarray|list|tuple|float|int
pts_y : numpy.ndarray|list|tuple|float|int
block_size : None|int
If provided, processing block size. The minimum value used will be
50,000.
Returns
-------
numpy.ndarray|bool
boolean array indicating inclusion.
"""
pts_x, pts_y = _validate_contain_arguments(pts_x, pts_y)
o_shape = pts_x.shape
if len(o_shape) == 0:
pts_x = numpy.reshape(pts_x, (1, ))
pts_y = numpy.reshape(pts_y, (1, ))
else:
pts_x = numpy.reshape(pts_x, (-1, ))
pts_y = numpy.reshape(pts_y, (-1, ))
if block_size is not None:
block_size = int(block_size)
block_size = max(50000, block_size)
if block_size is None or pts_x.size <= block_size:
in_poly = self._contained(pts_x, pts_y)
else:
in_poly = numpy.zeros(pts_x.shape, dtype='bool')
start_block = 0
while start_block < pts_x.size:
end_block = min(start_block+block_size, pts_x.size)
in_poly[start_block:end_block] = self._contained(
pts_x[start_block:end_block], pts_y[start_block:end_block])
start_block = end_block
if len(o_shape) == 0:
return in_poly[0]
else:
return numpy.reshape(in_poly, o_shape)
def grid_contained(self, grid_x, grid_y):
"""
Determines inclusion of a coordinate grid inside the polygon. The coordinate
grid is defined by the two one-dimensional coordinate arrays `grid_x` and `grid_y`.
Parameters
----------
grid_x : numpy.ndarray
grid_y : numpy.ndarray
Returns
-------
numpy.ndarray
boolean mask for point inclusion of the grid. Output is of shape
`(grid_x.size, grid_y.size)`.
"""
grid_x, grid_y = _validate_grid_contain_arguments(grid_x, grid_y)
out = numpy.zeros((grid_x.size, grid_y.size), dtype='bool')
if self._coordinates.shape[0] < 4:
# this is a degenerate linear ring with no interior
return out
first_ind, last_ind, direction = self._contained_segment_data(grid_x, grid_y)
if first_ind is None:
return out # it missed the whole bounding box
first_ind, last_ind, direction = self._contained_segment_data(grid_x, grid_y)
x_inds = numpy.arange(grid_x.size)
y_inds = numpy.arange(grid_y.size)
for index in range(first_ind, last_ind):
if direction == 'x':
seg = self._segmentation['x'][index]
start_x = x_inds[grid_x >= seg['min']].min()
end_x = x_inds[grid_x <= seg['max']].max() + 1
start_y = y_inds[grid_y >= seg['min_value']].min() if grid_y[-1] >= seg['min_value'] else None
end_y = y_inds[grid_y <= seg['max_value']].max() + 1 if start_y is not None else None
else:
seg = self._segmentation['y'][index]
start_x = x_inds[grid_x >= seg['min_value']].min() if grid_x[-1] >= seg['min_value'] else None
end_x = x_inds[grid_x <= seg['max_value']].max() + 1 if start_x is not None else None
start_y = y_inds[grid_y >= seg['min']].min()
end_y = y_inds[grid_y <= seg['max']].max() + 1
if start_x is not None and end_x is not None and start_y is not None and end_y is not None:
y_temp, x_temp = numpy.meshgrid(grid_y[start_y:end_y], grid_x[start_x:end_x], indexing='xy')
out[start_x:end_x, start_y:end_y] = self._contained_do_segment(x_temp, y_temp, seg, direction)
return out
def apply_projection(self, proj_method):
# type: (callable) -> LinearRing
return LinearRing(coordinates=proj_method(self.coordinates))
def to_dict(self, parent_dict=None):
"""
Serialize the LinearRing to json.
Note that the geojson standard requires that the serialized object has
positive orientation. In the case of an LinearRing defined with negative
orientation, the orientation of the object and the serialized object will
be reversed.
Parameters
----------
parent_dict : None|Dict
Returns
-------
Dict
"""
if self.orientation > 0:
return super(LinearRing, self).to_dict(parent_dict=parent_dict)
else:
self.reverse_orientation()
out = super(LinearRing, self).to_dict(parent_dict=parent_dict)
self.reverse_orientation()
return out
class Polygon(GeometryObject):
"""
A polygon object consisting of an outer LinearRing, and some collection of
interior LinearRings representing holes or voids.
"""
__slots__ = ('_outer_ring', '_inner_rings')
_type = 'Polygon'
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : None|List[numpy.ndarray]|List[List[float]]|List[LinearRing]|List[LineString]|Polygon
The first element is the outer ring, any remaining will be inner rings.
"""
self._outer_ring = None # type: Union[None, LinearRing]
self._inner_rings = None # type: Union[None, List[LinearRing]]
if isinstance(coordinates, Polygon):
coordinates = coordinates.get_coordinate_list()
if coordinates is None:
return
if not isinstance(coordinates, list):
raise TypeError('coordinates must be a list of linear ring coordinate arrays.')
if len(coordinates) < 1:
return
self.set_outer_ring(coordinates[0])
for entry in coordinates[1:]:
self.add_inner_ring(entry)
def self_intersection(self):
"""
Does this Polygon self intersect?
Returns
-------
bool
"""
if self.outer_ring is None:
return False
if self.outer_ring.self_intersection():
return True
if self.inner_rings is not None:
for entry in self.inner_rings:
if entry.self_intersection():
return True
for i in range(self.outer_ring.coordinates.shape[0] - 1):
for entry in self.inner_rings:
for j in range(entry.coordinates.shape[0] - 1):
result = _line_segments_intersect(
self.outer_ring.coordinates[i, :], self.outer_ring.coordinates[i + 1, :],
entry.coordinates[j, :], entry.coordinates[j + 1, :])
if result:
return True
return False
@property
def outer_ring(self):
"""
LinearRing: The outer ring.
"""
return self._outer_ring
@property
def inner_rings(self):
"""
None|List[LinearRing]: The inner rings.
"""
return self._inner_rings
@classmethod
def from_dict(cls, geometry):
# type: (Dict) -> Polygon
if not geometry.get('type', None) == cls._type:
raise ValueError(_poorly_formed_text.format(geometry))
return cls(coordinates=geometry['coordinates'])
def get_bbox(self):
if self._outer_ring is None:
return None
return self._outer_ring.get_bbox()
def get_coordinate_list(self):
if self._outer_ring is None:
return None
out = [self._outer_ring.get_coordinate_list(), ]
if self._inner_rings is not None:
for ir in self._inner_rings:
ir_reversed = LinearRing(ir.coordinates[::-1, :])
out.append(ir_reversed.get_coordinate_list())
return out
def set_outer_ring(self, coordinates):
"""
Set the outer ring for the Polygon.
Parameters
----------
coordinates : LinearRing|numpy.ndarray|list
Returns
-------
None
"""
if coordinates is None:
self._outer_ring = None
self._inner_rings = None
return
if isinstance(coordinates, (LinearRing, LineString)):
outer_ring = LinearRing(coordinates=coordinates.coordinates)
else:
outer_ring = LinearRing(coordinates=coordinates)
self._outer_ring = outer_ring
def add_inner_ring(self, coordinates):
if coordinates is None:
return
if self._outer_ring is None:
raise ValueError('A Polygon cannot have an inner ring with no outer ring defined.')
if self._inner_rings is None:
self._inner_rings = []
if isinstance(coordinates, (LinearRing, LineString)):
inner_ring = LinearRing(coordinates=coordinates.coordinates)
else:
inner_ring = LinearRing(coordinates=coordinates)
self._inner_rings.append(inner_ring)
def get_perimeter(self):
"""
Gets the perimeter of the linear ring.
Returns
-------
None|float
"""
if self._outer_ring is None:
return None
perimeter = self._outer_ring.get_perimeter()
if self._inner_rings is not None:
for entry in self._inner_rings:
perimeter += entry.get_perimeter()
return perimeter
def get_area(self):
"""
Gets the area of the polygon.
Returns
-------
None|float
"""
if self._outer_ring is None:
return None
area = abs(self._outer_ring.get_area()) # positive
if self._inner_rings is not None:
for entry in self._inner_rings:
area -= abs(entry.get_area()) # negative
return area
def get_centroid(self):
"""
Gets the centroid of the outer ring of the polygon - note that this may not actually lie in
the polygon interior for non-convex polygon. This will result in an undefined value
if the polygon is degenerate.
Returns
-------
numpy.ndarray
"""
if self._outer_ring is None:
return None
return self._outer_ring.get_centroid()
def contain_coordinates(self, pts_x, pts_y, block_size=None):
"""
Determines inclusion of the given points in the interior of the polygon.
The methodology here is based on the Jordan curve theorem approach.
** Warning - This method may provide erroneous results for a lat/lon polygon
crossing the bound of discontinuity and/or surrounding a pole.**
Note - If the points constitute an x/y grid, then the grid contained method will
be much more performant.
Parameters
----------
pts_x : numpy.ndarray|list|tuple|float|int
pts_y : numpy.ndarray|list|tuple|float|int
block_size : None|int
If provided, processing block size. The minimum value used will be
50,000.
Returns
-------
numpy.ndarray|bool
boolean array indicating inclusion.
"""
pts_x, pts_y = _validate_contain_arguments(pts_x, pts_y)
if self._outer_ring is None:
return numpy.zeros(pts_x.shape, dtype='bool')
o_shape = pts_x.shape
in_poly = self._outer_ring.contain_coordinates(pts_x, pts_y, block_size=block_size)
if self._inner_rings is not None:
for ir in self._inner_rings:
in_poly &= ~ir.contain_coordinates(pts_x, pts_y, block_size=block_size)
if len(o_shape) == 0:
return in_poly
else:
return numpy.reshape(in_poly, o_shape)
def grid_contained(self, grid_x, grid_y):
"""
Determines inclusion of a coordinate grid inside the polygon. The coordinate
grid is defined by the two one-dimensional coordinate arrays `grid_x` and `grid_y`.
Parameters
----------
grid_x : numpy.ndarray
grid_y : numpy.ndarray
Returns
-------
numpy.ndarray
boolean mask for point inclusion of the grid. Output is of shape
`(grid_x.size, grid_y.size)`.
"""
grid_x, grid_y = _validate_grid_contain_arguments(grid_x, grid_y)
if self._outer_ring is None:
return numpy.zeros((grid_x.size, grid_y.size), dtype='bool')
in_poly = self._outer_ring.grid_contained(grid_x, grid_y)
if self._inner_rings is not None:
for ir in self._inner_rings:
in_poly &= ~ir.grid_contained(grid_x, grid_y)
return in_poly
def add_to_kml(self, doc, parent, coord_transform):
if self._outer_ring is None:
return
outCoords = _get_kml_coordinate_string(self._outer_ring.coordinates, coord_transform)
inCoords = []
if self._inner_rings is not None:
inCoords = [_get_kml_coordinate_string(ir.coordinates, coord_transform) for ir in self._inner_rings]
doc.add_polygon(outCoords, inCoords=inCoords, par=parent)
def apply_projection(self, proj_method):
# type: (callable) -> Polygon
coords = [self._outer_ring.apply_projection(proj_method), ]
if self._inner_rings is not None:
coords.extend([lr.apply_projection(proj_method) for lr in self._inner_rings])
return Polygon(coordinates=coords)
def get_minimum_distance(self, point):
if self._outer_ring is None:
return float('inf')
o_dist = self.outer_ring.get_minimum_distance(point)
if self._inner_rings is None or len(self._inner_rings) < 1:
return o_dist
i_dist = min(entry.get_minimum_distance(point) for entry in self.inner_rings)
return min(o_dist, i_dist)
class MultiPolygon(GeometryObject):
"""
A collection of polygon objects.
"""
__slots__ = ('_polygons', )
_type = 'MultiPolygon'
_is_collection = True
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : None|List[List[List[float]]]|List[Polygon]|MultiPolygon
"""
self._polygons = None
if isinstance(coordinates, MultiPolygon):
coordinates = coordinates.get_coordinate_list()
if coordinates is not None:
self.polygons = coordinates
@property
def collection(self):
return self.polygons
@property
def polygons(self):
# type: () -> List[Polygon]
"""
List[Polygon]: The polygon collection.
"""
return self._polygons
@polygons.setter
def polygons(self, polygons):
if polygons is None:
self._polygons = None
return
if not isinstance(polygons, list):
raise TypeError(
'MultiPolygon requires the polygons is None or a list of Polygons. '
'Got type {}'.format(type(polygons)))
polys = []
for entry in polygons:
if isinstance(entry, Polygon):
polys.append(entry)
else:
polys.append(Polygon(coordinates=entry))
self._polygons = polys
def get_bbox(self):
if self._polygons is None:
return None
mins = []
maxs = []
for polygon in self.polygons:
t_bbox = polygon.get_bbox()
num_mins = len(t_bbox)//2
for i, entry in enumerate(t_bbox):
if (i < num_mins):
if len(mins) < num_mins:
mins.append(entry)
elif entry < mins[i]:
mins[i] = entry
else:
if len(maxs) < num_mins:
maxs.append(entry)
elif entry > maxs[i-num_mins]:
maxs[i-num_mins] = entry
mins.extend(maxs)
return mins
@classmethod
def from_dict(cls, geometry):
# type: (Dict) -> MultiPolygon
if not geometry.get('type', None) == cls._type:
raise ValueError(_poorly_formed_text.format(geometry))
return cls(coordinates=geometry['coordinates'])
def get_coordinate_list(self):
if self._polygons is None:
return None
return [polygon.get_coordinate_list() for polygon in self._polygons]
def get_perimeter(self):
"""
Gets the perimeter of the linear ring.
Returns
-------
None|float
"""
if self._polygons is None:
return None
return sum(entry.get_perimeter() for entry in self._polygons)
def get_area(self):
"""
Gets the area of the polygon.
Returns
-------
None|float
"""
if self._polygons is None:
return None
return sum(entry.get_area() for entry in self._polygons)
def contain_coordinates(self, pts_x, pts_y, block_size=None):
"""
Determines inclusion of the given points in the interior of the polygon.
The methodology here is based on the Jordan curve theorem approach.
** Warning - This method may provide erroneous results for a lat/lon polygon
crossing the bound of discontinuity and/or surrounding a pole.**
Note - If the points constitute an x/y grid, then the grid contained method will
be much more performant.
Parameters
----------
pts_x : numpy.ndarray|list|tuple|float|int
pts_y : numpy.ndarray|list|tuple|float|int
block_size : None|int
If provided, processing block size. The minimum value used will be
50,000.
Returns
-------
numpy.ndarray|bool
boolean array indicating inclusion.
"""
pts_x, pts_y = _validate_contain_arguments(pts_x, pts_y)
if self._polygons is None or len(self._polygons) == 0:
return numpy.zeros(pts_x.shape, dtype='bool')
in_poly = self._polygons[0].contain_coordinates(pts_x, pts_y, block_size=block_size)
for entry in self._polygons[1:]:
in_poly |= entry.contain_coordinates(pts_x, pts_y, block_size=block_size)
return in_poly
def grid_contained(self, grid_x, grid_y):
"""
Determines inclusion of a coordinate grid inside the polygon. The coordinate
grid is defined by the two one-dimensional coordinate arrays `grid_x` and `grid_y`.
Parameters
----------
grid_x : numpy.ndarray
grid_y : numpy.ndarray
Returns
-------
numpy.ndarray
boolean mask for point inclusion of the grid. Output is of shape
`(grid_x.size, grid_y.size)`.
"""
grid_x, grid_y = _validate_grid_contain_arguments(grid_x, grid_y)
if self._polygons is None or len(self._polygons) == 0:
return numpy.zeros((grid_x.size, grid_y.size), dtype='bool')
in_poly = self._polygons[0].grid_contained(grid_x, grid_y)
for entry in self._polygons[1:]:
in_poly |= entry.grid_contained(grid_x, grid_y)
return in_poly
def add_to_kml(self, doc, parent, coord_transform):
if self._polygons is None:
return
multigeometry = doc.add_multi_geometry(parent)
for geometry in self._polygons:
if geometry is not None:
geometry.add_to_kml(doc, multigeometry, coord_transform)
def apply_projection(self, proj_method):
# type: (callable) -> MultiPolygon
return MultiPolygon(coordinates=[poly.apply_projection(proj_method) for poly in self.polygons])
def get_minimum_distance(self, point):
if self._polygons is None:
return float('inf')
return min(entry.get_minimum_distance(point) for entry in self.polygons)
@classmethod
def assemble_from_collection(cls, *args):
"""
Assemble a multipolygon collection from input constituents.
Parameters
----------
args
A list of input Polygon and MultiPolygon objects.
Returns
-------
MultiPolygon
"""
def handle_arg(arg_in):
if isinstance(arg_in, LinearRing):
polygons.append(Polygon([arg_in, ]))
elif isinstance(arg_in, Polygon):
polygons.append(arg_in)
elif isinstance(arg_in, MultiPolygon):
polygons.extend(arg_in.polygons)
elif isinstance(arg_in, GeometryCollection):
for entry in arg_in.geometries:
handle_arg(entry)
else:
raise ValueError(_disallowed_text.format(type(arg_in)))
if len(args) == 0:
return cls()
polygons = []
for arg in args:
handle_arg(arg)
return cls(polygons)
def basic_assemble_from_collection(*args):
"""
Assemble the most suitable (flat) collective type from the input collection.
Parameters
----------
args
The input geometry objects.
Returns
-------
GeometryCollection|MultiPoint|MultiLineString|MultiPolygon
"""
try:
return MultiPoint.assemble_from_collection(*args)
except ValueError:
pass
try:
return MultiLineString.assemble_from_collection(*args)
except ValueError:
pass
try:
return MultiPolygon.assemble_from_collection(*args)
except ValueError:
pass
return GeometryCollection.assemble_from_collection(*args)
| 78,840 | 30.423276 | 119 | py |
sarpy | sarpy-master/sarpy/geometry/latlon.py | """
Module for converting between various latitude/longitude representations.
"""
import re
import numpy
__classification__ = "UNCLASSIFIED"
__author__ = "Wade Schwartzkopf"
def string(value, latlon, num_units=3, precision=None, delimiter='',
include_symbols=True, signed=False, padded=True):
"""
Convert latitude/longitude numeric values to customizable string format.
Supports ISO 6709:2008 formatted geographic coordinates:
* Annex D (human interface)
delimiter = ''; include_symbols = true; padded = true; signed = false
* Annex H (string representation)
delimiter = ''; include_symbols = false; padded = true; signed = true
Parameters
----------
value : float|numpy.ndarray|list|tuple
Value of latitude or longitude in decimal degrees or dms vector.
latlon : str
One of {'lat', 'lon'}, required for formatting string.
num_units : int
1 - decimal degrees; 2 - degrees/minutes; 3 - degrees/minutes/seconds.
Default is 3.
delimiter : str|list|tuple
Separators between degrees/minutes/seconds/hemisphere. Default is '' (empty).
include_symbols : bool
Whether to include degree, minute, second symbols. Default is true.
signed : bool
Whether to use +/- or N/S/E/W to represent hemisphere.
Default is false (N/S/E/W).
precision : int
Number of decimal points shown in finest unit. Default is 5 if
num_units==1, otherwise 0.
padded : bool
Whether to use zeros to pad out to consistent string length (3 digits for
longitude degrees, 2 digits for all other elements). Default is true.
"""
if isinstance(value, (numpy.ndarray, list, tuple)):
value = num(value)
elif not isinstance(value, float):
value = float(value)
# value should now be in decimal degrees
# Precision. Default is dependent on other input arguments.
if precision is None:
if num_units == 1:
precision = 5
else:
precision = 0
# Symbols
if include_symbols:
latlon_symbols = ('\xB0', "'", '"')
else:
latlon_symbols = ('', '', '')
# Delimiters
try:
if len(delimiter) != 3:
delimiter = [delimiter]*num_units
except: # Must be a scalar if len() didn't work
delimiter = [delimiter]*num_units
if signed:
delimiter[num_units-1] = '' # No separator needed for hemisphere
# Differences between latitude and longitude
if latlon == 'lat':
if value > 0:
hemisphere = 'N'
latlon_sign = '+'
else:
hemisphere = 'S'
latlon_sign = '-'
degrees_digits = 2
elif latlon == 'lon':
if value > 180:
value = value - 360
if value > 0:
hemisphere = 'E'
latlon_sign = '+'
else:
hemisphere = 'W'
latlon_sign = '-'
degrees_digits = 3
# Compute degrees/minutes/seconds
new_value = abs(value)
value = [None]*num_units
for i in range(num_units):
fraction = new_value % 1.
value[i] = int(new_value)
new_value = fraction*60
value[-1] = value[-1] + fraction
if num_units > 1 and round(value[-1], precision) == 60: # Seconds of 60 is invalid
value[-1] = 0
value[-2] = value[-2] + 1
if num_units == 3 and value[-2] == 60: # If adding 1 to minutes makes minutes 60 which is also invalid
value[-2] = 0
value[-3] = value[-3] + 1
# Build string
latlon_string = ''
for i in range(num_units):
if padded:
if i == 0:
int_digits = degrees_digits
else:
int_digits = 2 # True for all but longitude degrees
else:
int_digits = 1
if (i + 1) == num_units:
precision_digits = precision
else:
precision_digits = 0
if precision_digits > 0:
int_digits = int_digits + 1 # Account for the decimal point
latlon_string = '%s%0*.*f%s%s' % \
(latlon_string, int_digits + precision_digits, precision_digits,
abs(value[i]), latlon_symbols[i], delimiter[i])
if signed:
latlon_string = latlon_sign + latlon_string
else:
latlon_string = latlon_string + hemisphere
return latlon_string
def dms(degrees):
"""
Calculate degrees, minutes, seconds representation from decimal degrees.
Parameters
----------
degrees : float
Returns
-------
(int, int, float)
"""
degrees_int = int(abs(degrees)) # integer degrees
degrees_frac = abs(degrees) - degrees_int # fractional degrees, used to compute minutes
minutes_int = float(int(degrees_frac * 60)) # integer minutes
minutes_frac = degrees_frac * 60 - minutes_int # fractional minutes, used to compute seconds
seconds = minutes_frac * 60 # decimal seconds
# Handle sign. Degrees portion will contain the sign of the coordinate.
# Minutes and seconds will always be positive.
# sign function returns -1, 0, +1 for x < 0, x == 0, x > 0, respectively
if degrees < 0:
degrees_int *= -1
return degrees_int, minutes_int, seconds
def num(latlon_input):
"""
Convert a variety of lat/long formats into decimal degree value.
This should handle any string compliant with the ISO 6709:2008 standard
or any of a number of variants for describing lat/long coordinates.
Also handles degree/minutes/seconds passed in as a tuple/list/array.
Parameters
----------
latlon_input : numpy.ndarray|list|tuple|str
Returns
-------
float
"""
# Vector format degrees/minutes/seconds
if isinstance(latlon_input, (numpy.ndarray, list, tuple)) and len(latlon_input) == 3:
return float(latlon_input[0]) + float(latlon_input[1])/60. + float(latlon_input[2])/3600.
if not isinstance(latlon_input, str):
raise ValueError('Expected a (degree, minutes, seconds) tuple of string. '
'Got type {}'.format(type(latlon_input)))
# String input
# Handles decimal degrees and degree/minutes/second with delimiters
# Any non-numeric characters in string are considered delimiters
tokens_str = list(filter(lambda x: len(x.strip()) > 0, re.split(r'[^.\d]', latlon_input)))
tokens = [float(x) for x in tokens_str]
decimal_degrees = numpy.polynomial.polynomial.polyval(1/60., numpy.abs(tokens))
if ('W' in latlon_input or 'S' in latlon_input) != ('-' in latlon_input):
decimal_degrees = -decimal_degrees
# Handles degree/minutes/second with no delimiters DDD,DDDMM,DDDMMSS
if len(tokens) == 1:
for i in range(min(3, int(len(tokens_str[0].split('.')[0])/2)-1)):
decimal_degrees = (numpy.fix(decimal_degrees/100) +
numpy.fmod(decimal_degrees, 100)/60.)
# Error checking should occur here
if (len(tokens) < 1 or len(tokens) > 3 or
decimal_degrees < -180 or decimal_degrees > 360 or
sum(c.isalpha() for c in latlon_input) > 1):
decimal_degrees = float('nan') # Unparseable inputs are returned as NaN
return float(decimal_degrees)
| 7,323 | 34.726829 | 111 | py |
sarpy | sarpy-master/sarpy/geometry/__init__.py |
__classification__ = "UNCLASSIFIED"
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/geometry/geocoords.py | """
Provides coordinate transforms for WGS-84 and ECF coordinate systems
"""
import numpy
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Wade Schwartzkopf")
#####
# WGS-84 parameters and related derived parameters
_A = 6378137.0 # Semi-major radius (m)
_F = 1/298.257223563 # Flattening
_GM = 3986004.418E8 # Earth's gravitational constant (including atmosphere)
_W = 7292115.1467E-11 # Angular velocity (radians/second), not including precession
_B = _A - _F*_A # 6356752.3142, Semi-minor radius (m)
_A2 = _A*_A
_B2 = _B*_B
_E2 = (_A2-_B2)/_A2 # 6.69437999014E-3, First eccentricity squared
_E4 = _E2*_E2
_OME2 = 1.0 - _E2
_EB2 = (_A2 - _B2)/_B2
def _validate(arr):
if not isinstance(arr, numpy.ndarray):
arr = numpy.array(arr, dtype='float64')
if arr.shape[-1] != 3:
raise ValueError(
'The input argument should represent geographical coordinates, so the '
'final dimension should have size 3. Got shape {}.'.format(arr.shape))
orig_shape = arr.shape
arr = numpy.reshape(arr, (-1, 3)) # this is just a view
return arr, orig_shape
def ecf_to_geodetic(ecf, ordering='latlong'):
"""
Converts ECF (Earth Centered Fixed) coordinates to WGS-84 coordinates.
Parameters
----------
ecf : numpy.ndarray|list|tuple
ordering : str
If 'longlat', then the return will be `[longitude, latitude, hae]`.
Otherwise, the return will be `[latitude, longitude, hae]`.
Returns
-------
numpy.ndarray
The WGS-84 coordinates, of the same shape as `ecf`.
"""
ecf, orig_shape = _validate(ecf)
x = ecf[:, 0]
y = ecf[:, 1]
z = ecf[:, 2]
llh = numpy.full(ecf.shape, numpy.nan, dtype=numpy.float64)
r = numpy.sqrt((x * x) + (y * y))
# Check for invalid solution
valid = ((_A*r)*(_A*r) + (_B*z)*(_B*z) > (_A2 - _B2)*(_A2 - _B2))
# calculate intermediates
F = 54.0*_B2*z*z # not the WGS 84 flattening parameter
G = r*r + _OME2*z*z - _E2*(_A2 - _B2)
C = _E4*F*r*r/(G*G*G)
S = (1.0 + C + numpy.sqrt(C*C + 2*C))**(1./3)
P = F/(3.0*(G*(S + 1.0/S + 1.0))**2)
Q = numpy.sqrt(1.0 + 2.0*_E4*P)
R0 = -P*_E2*r/(1.0 + Q) + numpy.sqrt(numpy.abs(0.5*_A2*(1.0 + 1/Q) - P*_OME2*z*z/(Q*(1.0 + Q)) - 0.5*P*r*r))
T = r - _E2*R0
U = numpy.sqrt(T*T + z*z)
V = numpy.sqrt(T*T + _OME2*z*z)
z0 = _B2*z/(_A*V)
# account for ordering
if ordering.lower() == 'longlat':
inds = [0, 1, 2]
else:
inds = [1, 0, 2]
# calculate longitude
llh[valid, inds[0]] = numpy.rad2deg(numpy.arctan2(y[valid], x[valid]))
# calculate latitude
llh[valid, inds[1]] = numpy.rad2deg(numpy.arctan2(z[valid] + _EB2*z0[valid], r[valid]))
# calculate altitude
llh[valid, inds[2]] = U[valid]*(1.0 - _B2/(_A*V[valid]))
return numpy.reshape(llh, orig_shape)
def geodetic_to_ecf(llh, ordering='latlong'):
"""
Converts WGS-84 coordinates to ECF (Earth Centered Fixed).
Parameters
----------
llh : numpy.ndarray|list|tuple
ordering : str
If 'longlat', then the input is `[longitude, latitude, hae]`.
Otherwise, the input is `[latitude, longitude, hae]`.
Returns
-------
numpy.ndarray
The ECF coordinates, of the same shape as `llh`.
"""
llh, orig_shape = _validate(llh)
# account for ordering
if ordering.lower() == 'longlat':
inds = [0, 1, 2]
else:
inds = [1, 0, 2]
lon = llh[:, inds[0]]
lat = llh[:, inds[1]]
alt = llh[:, inds[2]]
out = numpy.full(llh.shape, numpy.nan, dtype=numpy.float64)
# calculate distance to surface of ellipsoid
r = _A / numpy.sqrt(1.0 - _E2*numpy.sin(numpy.deg2rad(lat)) * numpy.sin(numpy.deg2rad(lat)))
# calculate coordinates
out[:, 0] = (r + alt)*numpy.cos(numpy.deg2rad(lat))*numpy.cos(numpy.deg2rad(lon))
out[:, 1] = (r + alt)*numpy.cos(numpy.deg2rad(lat))*numpy.sin(numpy.deg2rad(lon))
out[:, 2] = (r + alt - _E2*r)*numpy.sin(numpy.deg2rad(lat))
return numpy.reshape(out, orig_shape)
def wgs_84_norm(ecf):
"""
Calculates the normal vector to the WGS_84 ellipsoid at the given ECF coordinates.
Parameters
----------
ecf : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
The normal vector, of the same shape as `ecf`.
"""
ecf, orig_shape = _validate(ecf)
out = numpy.copy(ecf)/numpy.array([_A2, _A2, _B2], dtype=numpy.float64)
out = out/(numpy.linalg.norm(out, axis=1)[:, numpy.newaxis])
return numpy.reshape(out, orig_shape)
def _ecf_to_ned_matrix(orp_coord):
"""
Get the rotation matrix for converting ECF to NED coordinate system
conversion.
Note: The array orientation convention indicates array multiplication on the
RIGHT, so this is the transpose of the transform matrix for left multiplication.
Parameters
----------
orp_coord : numpy.ndarray
The origin reference point. This is assumed given in ECF coordinates.
Returns
-------
numpy.ndarray
"""
if not isinstance(orp_coord, numpy.ndarray) or orp_coord.ndim != 1 or orp_coord.size != 3:
raise ValueError('orp_coord must be a one-dimensional array of length 3.')
llh = ecf_to_geodetic(orp_coord)
angle2 = numpy.deg2rad(-90 - llh[0])
angle1 = numpy.deg2rad(llh[1])
matrix1 = numpy.array([[numpy.cos(angle1), -numpy.sin(angle1), 0],
[numpy.sin(angle1), numpy.cos(angle1), 0],
[0, 0, 1]], dtype='float64')
matrix2 = numpy.array([[numpy.cos(angle2), 0, numpy.sin(angle2)],
[0, 1, 0],
[-numpy.sin(angle2), 0, numpy.cos(angle2)]], dtype='float64')
return matrix1.dot(matrix2)
def ecf_to_ned(ecf_coords, orp_coord, absolute_coords=True):
"""
Convert from ECF to North-East-Down (NED) coordinates.
Parameters
----------
ecf_coords : numpy.ndarray
orp_coord : numpy.ndarray
absolute_coords : bool
Are these absolute (i.e. position) coordinates? The alternative is relative
coordinates like velocity, acceleration, or unit vector values.
Returns
-------
numpy.ndarray
"""
if not isinstance(orp_coord, numpy.ndarray):
orp_coord = numpy.array(orp_coord, dtype='float64')
transform = _ecf_to_ned_matrix(orp_coord)
# NB: orp_coord is guaranteed to be shape (3, )
ecf_coords, o_shape = _validate(ecf_coords)
if absolute_coords:
out = (ecf_coords - orp_coord).dot(transform)
else:
out = ecf_coords.dot(transform)
return numpy.reshape(out, o_shape)
def ned_to_ecf(ned_coords, orp_coord, absolute_coords=True):
"""
Convert from North-East-Down (NED) to ECF coordinates.
Parameters
----------
ned_coords : numpy.ndarray
The NED coordinates.
orp_coord : numpy.ndarray
The Origin Reference Point in ECF coordinates.
absolute_coords : bool
Are these absolute (i.e. position) coordinates? The alternative is relative
coordinates like velocity, acceleration, or unit vector values.
Returns
-------
numpy.ndarray
"""
if not isinstance(orp_coord, numpy.ndarray):
orp_coord = numpy.array(orp_coord, dtype='float64')
transform = _ecf_to_ned_matrix(orp_coord).transpose() # transpose = inverse here
# NB: orp_coord is guaranteed to be shape (3, )
ned_coords, o_shape = _validate(ned_coords)
out = ned_coords.dot(transform)
if absolute_coords:
out += orp_coord
return numpy.reshape(out, o_shape)
def _ecf_to_enu_matrix(orp_coord):
"""
Get the rotation matrix for converting from ECF to ENU.
Note: The array orientation convention indicates array multiplication on the
RIGHT, so this is the transpose of the transform matrix for left multiplication.
Parameters
----------
orp_coord : numpy.ndarray
The origin reference point. This is assumed given in ECF coordinates.
Returns
-------
numpy.ndarray
"""
ned_matrix = _ecf_to_ned_matrix(orp_coord)
ned_to_enu = numpy.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]], dtype='float64')
return ned_matrix.dot(ned_to_enu)
def ecf_to_enu(ecf_coords, orp_coord, absolute_coords=True):
"""
Convert from ECF to East-North-Up (ENU) coordinates.
Parameters
----------
ecf_coords : numpy.ndarray
orp_coord : numpy.ndarray
absolute_coords : bool
Are these absolute (i.e. position) coordinates? The alternative is relative
coordinates like velocity, acceleration, or unit vector values.
Returns
-------
numpy.ndarray
"""
if not isinstance(orp_coord, numpy.ndarray):
orp_coord = numpy.array(orp_coord, dtype='float64')
transform = _ecf_to_enu_matrix(orp_coord)
# NB: orp_coord is guaranteed to be shape (3, )
ecf_coords, o_shape = _validate(ecf_coords)
if absolute_coords:
out = (ecf_coords - orp_coord).dot(transform)
else:
out = ecf_coords.dot(transform)
return numpy.reshape(out, o_shape)
def enu_to_ecf(enu_coords, orp_coord, absolute_coords=True):
"""
Convert from East-North-UP (ENU) to ECF coordinates.
Parameters
----------
enu_coords : numpy.ndarray
The ENU coordinates.
orp_coord : numpy.ndarray
The Origin Reference Point in ECF coordinates.
absolute_coords : bool
Are these absolute (i.e. position) coordinates? The alternative is relative
coordinates like velocity, acceleration, or unit vector values.
Returns
-------
numpy.ndarray
"""
if not isinstance(orp_coord, numpy.ndarray):
orp_coord = numpy.array(orp_coord, dtype='float64')
transform = _ecf_to_enu_matrix(orp_coord).transpose() # transpose = inverse here
# NB: orp_coord is guaranteed to be shape (3, )
enu_coords, o_shape = _validate(enu_coords)
out = enu_coords.dot(transform)
if absolute_coords:
out += orp_coord
return numpy.reshape(out, o_shape)
| 10,143 | 29.926829 | 112 | py |
sarpy | sarpy-master/sarpy/visualization/kmz_product_creation.py | """
This module provides tools for creating kmz products for a SICD type element.
.. Note::
Creation of ground overlays (i.e. image overlay) requires the optional
Pillow dependency for image manipulation.
Examples
--------
Create a kmz overview for the contents of a sicd type reader.
.. code-block:: python
import os
from sarpy.io.complex.converter import open_complex
from sarpy.visualization.kmz_product_creation import create_kmz_view
test_root = '<root directory>'
reader = open_complex(os.path.join(test_root, '<file name>>'))
create_kmz_view(reader, test_root,
file_stem='View-<something descriptive>',
pixel_limit=2048,
inc_collection_wedge=True)
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
# TODO: tidy up significantly
import logging
from typing import Union
import json
import os
import numpy
from sarpy.processing.rational_polynomial import SarpyRatPolyError
from sarpy.processing.ortho_rectify.base import FullResolutionFetcher, OrthorectificationIterator
from sarpy.processing.ortho_rectify.ortho_methods import OrthorectificationHelper, NearestNeighborMethod
from sarpy.processing.ortho_rectify.projection_helper import PGProjection, PGRatPolyProjection
from sarpy.io.kml import Document
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.utils import sicd_reader_iterator
from sarpy.geometry.geocoords import ecf_to_geodetic
from sarpy.visualization.remap import RemapFunction, NRL
try:
# noinspection PyPackageRequirements
import PIL
import PIL.Image
except ImportError:
PIL = None
logger = logging.getLogger(__name__)
def _create_sicd_styles(kmz_document):
"""
Creates the appropriate styles for SICD usage.
Parameters
----------
kmz_document : Document
Returns
-------
None
"""
# bounding box style - maybe polygon, maybe corner points, clamped to ground
label = {'color': 'ffc0c0c0', 'scale': '1.0'}
icon = {'scale': '1.5', 'icon_ref': 'http://maps.google.com/mapfiles/kml/pushpin/blue-pushpin.png'}
line = {'color': 'ccff5050', 'width': '2.0'}
poly = {'color': '30ff5050'}
kmz_document.add_style('bounding_high', label_style=label, icon_style=icon, line_style=line, poly_style=poly)
label['scale'] = '0.75'
icon['scale'] = '1.0'
line['width'] = '1.0'
kmz_document.add_style('bounding_low', label_style=label, icon_style=icon, line_style=line, poly_style=poly)
kmz_document.add_style_map('bounding', 'bounding_high', 'bounding_low')
# valid data style - basic polygon, probably clamped to ground
line = {'color': 'cc5050ff', 'width': '2.0'}
poly = {'color': '305050ff'}
kmz_document.add_style('valid_high', line_style=line, poly_style=poly)
line['width'] = '1.0'
kmz_document.add_style('valid_low', line_style=line, poly_style=poly)
kmz_document.add_style_map('valid', 'valid_high', 'valid_low')
# scp - intended for basic point clamped to ground
label = {'color': 'ff50c0c0', 'scale': '1.0'}
icon = {'color': 'ff5050c0', 'scale': '1.5',
'icon_ref': 'http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png'}
kmz_document.add_style('scp_high', label_style=label, icon_style=icon)
label['scale'] = '0.75'
icon['scale'] = '1.0'
kmz_document.add_style('scp_low', label_style=label, icon_style=icon)
kmz_document.add_style_map('scp', 'scp_high', 'scp_low')
# arp position style - intended for gx track
line = {'color': 'ff50ff50', 'width': '1.5'}
label = {'color': 'ffc0c0c0', 'scale': '1.5'}
icon = {'scale': '2.0', 'icon_ref': 'http://maps.google.com/mapfiles/kml/shapes/track.png'}
poly = {'color': 'a050ff50'}
kmz_document.add_style('arp_high', line_style=line, label_style=label, icon_style=icon, poly_style=poly)
line['width'] = '1.0'
label['scale'] = '1.0'
icon['scale'] = '1.0'
poly = {'color': '7050ff50'}
kmz_document.add_style('arp_low', line_style=line, label_style=label, icon_style=icon, poly_style=poly)
kmz_document.add_style_map('arp', 'arp_high', 'arp_low')
# collection wedge style - intended as polygon
line = {'color': 'ffa0a050', 'width': '1.5'}
poly = {'color': 'a0a0a050'}
kmz_document.add_style('collection_high', line_style=line, poly_style=poly)
line['width'] = '1.0'
poly = {'color': '70a0a050'}
kmz_document.add_style('collection_low', line_style=line, poly_style=poly)
kmz_document.add_style_map('collection', 'collection_high', 'collection_low')
def _get_sicd_name(sicd):
"""
Gets the kml-styled name for the provided SICD.
Parameters
----------
sicd : SICDType
Returns
-------
str
"""
return sicd.CollectionInfo.CoreName
def _get_sicd_description(sicd):
"""
Gets the kml-styled description for the provided SICD.
Parameters
----------
sicd : SICDType
Returns
-------
str
"""
o_sicd = sicd.copy()
# junk the WgtFunct, it's huge and probably not interesting
try:
o_sicd.Grid.Row.WgtFunct = None
o_sicd.Grid.Col.WgtFunct = None
except AttributeError:
pass
return json.dumps(o_sicd.to_dict(), indent=1)
def _get_orthoiterator_description(ortho_iterator):
"""
Get a description for the ortho_iterator details.
Parameters
----------
ortho_iterator : OrthorectificationIterator
Returns
-------
str
"""
return 'ortho-rectified image for {2:s}<br>' \
'row resolution - {0:0.2f} meters<br>' \
'column resolution - {1:0.2f} meters<br>' \
'remap function - {3:s}'.format(
ortho_iterator.ortho_helper.proj_helper.row_spacing,
ortho_iterator.ortho_helper.proj_helper.col_spacing,
_get_sicd_name(ortho_iterator.sicd),
ortho_iterator.remap_function.name)
def _get_sicd_time_args(sicd, subdivisions=24):
# type: (SICDType, Union[int, None]) -> (dict, Union[None, numpy.ndarray])
"""
Fetch the SICD time arguments and array.
Parameters
----------
sicd : SICDType
subdivisions : int|None
Returns
-------
(dict, None|numpy.ndarray)
"""
if sicd.Timeline is None or sicd.Timeline.CollectStart is None:
return {}, None
beg_time = sicd.Timeline.CollectStart.astype('datetime64[us]')
if sicd.Timeline.CollectDuration is None:
return {'when': str(beg_time)+'Z', }, None
end_time = beg_time + int(sicd.Timeline.CollectDuration*1e6)
if not isinstance(subdivisions, int) or subdivisions < 2:
time_array = None
else:
time_array = numpy.linspace(0, sicd.Timeline.CollectDuration, subdivisions)
return {'beginTime': str(beg_time)+'Z', 'endTime': str(end_time)+'Z'}, time_array
def _write_image_corners(kmz_document, sicd, time_args, folder, write_points=True):
"""
Write the image corner.
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
folder : minidom.Element
write_points : bool
Write points, or a polygon?
Returns
-------
None
"""
if sicd.GeoData is None or sicd.GeoData.ImageCorners is None:
return
frm = '{1:0.8f},{0:0.8f},0'
corners = sicd.GeoData.ImageCorners.get_array(dtype='float64')
if numpy.any(~numpy.isfinite(corners)):
logger.error('There are nonsense entries (nan or +/- infinity) in the corner locations array.')
if write_points:
names = ['FRFC', 'FRLC', 'LRLC', 'LRFC']
for nam, corner in zip(names, corners):
if numpy.any(~numpy.isfinite(corner)):
continue
coords = frm.format(*corner)
placemark = kmz_document.add_container(par=folder, description='{} for {}'.format(nam, _get_sicd_name(sicd)),
styleUrl='#bounding')
kmz_document.add_point(coords, par=placemark, altitudeMode='clampToGround', **time_args)
else:
# write the polygon
coords = ' '.join(frm.format(*el) for el in corners if not numpy.any(~numpy.isfinite(el)))
placemark = kmz_document.add_container(par=folder, description='image corners for {}'.format(_get_sicd_name(sicd)), styleUrl='#bounding')
kmz_document.add_polygon(coords, par=placemark, altitudeMode='clampToGround', **time_args)
def _write_valid_area(kmz_document, sicd, time_args, folder):
"""
Write the valid area polygon.
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
folder : minidom.Element
Returns
-------
None
"""
if sicd.GeoData is None or sicd.GeoData.ValidData is None:
return
frm = '{1:0.8f},{0:0.8f},0'
valid_array = sicd.GeoData.ValidData.get_array(dtype='float64')
if numpy.any(~numpy.isfinite(valid_array)):
logger.error('There are nonsense entries (nan or +/- infinity) in the valid array location.')
coords = ' '.join(frm.format(*el) for el in valid_array)
coords += ' ' + frm.format(*valid_array[0, :])
placemark = kmz_document.add_container(par=folder, description='valid data for {}'.format(_get_sicd_name(sicd)), styleUrl='#valid')
kmz_document.add_polygon(coords, par=placemark, altitudeMode='clampToGround', **time_args)
def _write_scp(kmz_document, sicd, time_args, folder):
"""
Write the scp location.
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
folder : minidom.Element
Returns
-------
None
"""
if sicd.GeoData is None or sicd.GeoData.SCP is None:
return
scp_llh = sicd.GeoData.SCP.LLH.get_array()
if numpy.any(~numpy.isfinite(scp_llh)):
logger.error('There are nonsense entries (nan or +/- infinity) in the scp location.')
frm = '{1:0.8f},{0:0.8f},0'
coords = frm.format(*scp_llh)
placemark = kmz_document.add_container(par=folder, description='SCP for {}'.format(_get_sicd_name(sicd)), styleUrl='#scp')
kmz_document.add_point(coords, par=placemark, altitudeMode='clampToGround', **time_args)
def _write_arp_location(kmz_document, sicd, time_args, time_array, folder):
"""
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
time_array : None|numpy.ndarray
folder : minidom.Element
Returns
-------
None|Numpy.ndarray
"""
if time_array is None:
return None
if sicd.Position is not None and sicd.Position.ARPPoly is not None:
arp_pos = sicd.Position.ARPPoly(time_array)
elif sicd.SCPCOA.ARPPos is not None and sicd.SCPCOA.ARPVel is not None:
arp_pos = sicd.SCPCOA.ARPPos.get_array() + numpy.outer(time_array, sicd.SCPCOA.ARPVel.get_array())
else:
return None
arp_llh = ecf_to_geodetic(arp_pos)
if numpy.any(~numpy.isfinite(arp_llh)):
logger.error('There are nonsense entries (nan or +/- infinity) in the aperture location.')
coords = ['{1:0.8f},{0:0.8f},{2:0.2f}'.format(*el) for el in arp_llh]
whens = [str(sicd.Timeline.CollectStart.astype('datetime64[us]') + int(el*1e6)) + 'Z' for el in time_array]
placemark = kmz_document.add_container(par=folder, description='aperture position for {}'.format(_get_sicd_name(sicd)), styleUrl='#arp', **time_args)
kmz_document.add_gx_track(coords, whens, par=placemark, extrude=True, tesselate=True, altitudeMode='absolute')
return arp_llh
def _write_collection_wedge(kmz_document, sicd, time_args, arp_llh, time_array, folder):
"""
Writes the collection wedge.
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
arp_llh : None|numpy.ndarray
time_array : None|numpy.ndarray
folder : minidom.Element
Returns
-------
None
"""
if time_array is None or arp_llh is None:
return
if sicd.Position is not None and sicd.Position.GRPPoly is not None:
grp = sicd.Position.GRPPoly(time_array)
elif sicd.GeoData is not None and sicd.GeoData.SCP is not None:
grp = numpy.reshape(sicd.GeoData.SCP.ECF.get_array(), (1, 3))
else:
return
frm = '{1:0.8f},{0:0.8f},{2:0.2f}'
grp_llh = ecf_to_geodetic(grp)
if numpy.any(~numpy.isfinite(grp_llh)):
logger.error('There are nonsense entries (nan or +/- infinity) in the scp/ground range locations.')
coord_array = [frm.format(*el) for el in arp_llh]
if len(grp_llh) > 1:
coord_array.extend(frm.format(*el) for el in grp_llh[::-1, :])
else:
coord_array.append(frm.format(*grp_llh[0, :]))
coord_array.append(frm.format(*arp_llh[0, :]))
coords = ' '.join(coord_array)
placemark = kmz_document.add_container(par=folder, description='collection wedge for {}'.format(_get_sicd_name(sicd)), styleUrl='#collection', **time_args)
kmz_document.add_polygon(coords, par=placemark, extrude=False, tesselate=False, altitudeMode='absolute')
def _write_sicd_overlay(ortho_iterator, kmz_document, folder):
"""
Write the orthorectified SICD ground overlay.
Parameters
----------
ortho_iterator : OrthorectificationIterator
kmz_document : Document
folder : minidom.Element
Returns
-------
None
"""
def reorder_corners(llh_in):
return llh_in[::-1, :]
if PIL is None:
logger.error(
'This functionality for writing kmz ground overlays requires the optional Pillow dependency.')
return
time_args, _ = _get_sicd_time_args(ortho_iterator.sicd, subdivisions=None)
# create the output workspace
if ortho_iterator.remap_function.bit_depth != 8:
raise ValueError('The bit depth for the remap function must be 8, for now.')
image_data = numpy.zeros(ortho_iterator.ortho_data_size, dtype=ortho_iterator.remap_function.output_dtype)
# populate by iterating
for data, start_indices in ortho_iterator:
image_data[start_indices[0]:start_indices[0]+data.shape[0],
start_indices[1]:start_indices[1]+data.shape[1]] = data
# create regionated overlay
# convert image array to PIL image.
img = PIL.Image.fromarray(image_data) # this is to counteract the PIL treatment
lat_lon_quad = reorder_corners(ortho_iterator.get_llh_image_corners())
kmz_document.add_regionated_ground_overlay(
img, folder, lat_lon_quad=lat_lon_quad[:, :2], img_format='JPEG',
name='image overlay for {}'.format(_get_sicd_name(ortho_iterator.sicd)),
description=_get_orthoiterator_description(ortho_iterator))
def prepare_kmz_file(file_name, **args):
"""
Prepare a kmz document and archive for exporting.
Parameters
----------
file_name : str
args
Passed through to the Document constructor.
Returns
-------
Document
"""
document = Document(file_name=file_name, **args)
_create_sicd_styles(document)
return document
def add_sicd_geometry_elements(sicd, kmz_document, folder,
inc_image_corners=True, inc_valid_data=False,
inc_scp=False, inc_collection_wedge=True):
"""
Write the geometry elements of a SICD.
Parameters
----------
sicd : SICDType
kmz_document : Document
folder : minidom.Element
inc_image_corners : bool
Include the image corners, if possible?
inc_valid_data : bool
Include the valid image area, if possible?
inc_scp : bool
Include the scp?
inc_collection_wedge : bool
Include the aperture location and collection wedge?
Returns
-------
None
"""
# let's define the time data for the SICD
time_args, time_array = _get_sicd_time_args(sicd)
# add the image corners/bounding box
if inc_image_corners:
_write_image_corners(kmz_document, sicd, time_args, folder)
# add the valid data
if inc_valid_data:
_write_valid_area(kmz_document, sicd, time_args, folder)
# write scp data
if inc_scp:
_write_scp(kmz_document, sicd, time_args, folder)
# write arp position and collection wedge
if inc_collection_wedge:
arp_llh = _write_arp_location(kmz_document, sicd, time_args, time_array, folder)
_write_collection_wedge(kmz_document, sicd, time_args, arp_llh, time_array, folder)
def add_sicd_from_ortho_helper(kmz_document, ortho_helper,
inc_image_corners=False, inc_valid_data=False,
inc_scp=False, inc_collection_wedge=False,
block_size=10, remap_function=None):
"""
Adds for a SICD to the provided open kmz from an ortho-rectification helper.
Parameters
----------
kmz_document : Document
ortho_helper : OrthorectificationHelper
inc_image_corners : bool
Include the image corners, if possible?
inc_valid_data : bool
Include the valid image area, if possible?
inc_scp : bool
Include the scp?
inc_collection_wedge : bool
Include the aperture location and collection wedge?
block_size : None|int|float
The block size for the iterator
remap_function : None|RemapFunction
The remap function to apply, or a suitable default will be chosen.
"""
if not isinstance(ortho_helper, OrthorectificationHelper):
raise TypeError(
'ortho_helper must be an OrthorectificationHelper instance, got '
'type {}'.format(type(ortho_helper)))
if not isinstance(kmz_document, Document):
raise TypeError(
'kmz_document must be an sarpy.io.kml.Document instance, got '
'type {}'.format(type(kmz_document)))
# create a folder for these sicd details
sicd = ortho_helper.sicd
folder = kmz_document.add_container(
the_type='Folder', name=_get_sicd_name(sicd), description=_get_sicd_description(sicd))
# write the sicd details aside from the overlay
add_sicd_geometry_elements(sicd, kmz_document, folder,
inc_image_corners=inc_image_corners, inc_valid_data=inc_valid_data,
inc_scp=inc_scp, inc_collection_wedge=inc_collection_wedge)
# create the ortho-rectification iterator
if remap_function is None:
remap_function = NRL()
calculator = FullResolutionFetcher(
ortho_helper.reader, index=ortho_helper.index, dimension=1, block_size=block_size)
ortho_iterator = OrthorectificationIterator(
ortho_helper, calculator=calculator, remap_function=remap_function,
recalc_remap_globals=True)
# write the image overlay
_write_sicd_overlay(ortho_iterator, kmz_document, folder)
def add_sicd_to_kmz(kmz_document, reader, index=0, pixel_limit=2048,
inc_image_corners=False, inc_valid_data=False,
inc_scp=False, inc_collection_wedge=False,
block_size=10, remap_function=None):
"""
Adds elements for this SICD to the provided open kmz.
Parameters
----------
kmz_document : Document
The kmz document, which must be open and have an associated archive.
reader : SICDTypeReader
The reader instance, must be of sicd type:
index : int
The index to use.
pixel_limit : None|int
The limit in pixel size to use for the constructed ground overlay.
inc_image_corners : bool
Include the image corners, if possible?
inc_valid_data : bool
Include the valid image area, if possible?
inc_scp : bool
Include the scp?
inc_collection_wedge : bool
Include the aperture location and collection wedge?
block_size : None|int|float
The block size for the iterator
remap_function : None|RemapFunction
The remap function to apply, or a suitable default will be chosen.
Returns
-------
None
"""
if not isinstance(reader, SICDTypeReader):
raise TypeError('reader must be a instance of SICDTypeReader. Got type {}'.format(type(reader)))
if pixel_limit is not None:
pixel_limit = int(pixel_limit)
if pixel_limit < 512:
pixel_limit = 512
# create our projection helper
index = int(index)
sicd = reader.get_sicds_as_tuple()[index]
try:
proj_helper = PGRatPolyProjection(sicd)
except SarpyRatPolyError:
proj_helper = PGProjection(sicd)
# create our orthorectification helper
ortho_helper = NearestNeighborMethod(reader, index=index, proj_helper=proj_helper)
if pixel_limit is not None:
# let's see what the ortho-rectified size will be
ortho_size = ortho_helper.get_full_ortho_bounds()
row_count = ortho_size[1] - ortho_size[0]
col_count = ortho_size[3] - ortho_size[2]
# reset the row/column spacing, if necessary
if row_count > pixel_limit:
proj_helper.row_spacing *= row_count/float(pixel_limit)
if col_count > pixel_limit:
proj_helper.col_spacing *= col_count/float(pixel_limit)
if isinstance(proj_helper, PGRatPolyProjection):
proj_helper.perform_rational_poly_fitting()
# add the sicd details
add_sicd_from_ortho_helper(
kmz_document, ortho_helper,
inc_image_corners=inc_image_corners, inc_valid_data=inc_valid_data, inc_scp=inc_scp,
inc_collection_wedge=inc_collection_wedge, block_size=block_size, remap_function=remap_function)
def create_kmz_view(
reader, output_directory, file_stem='view', pixel_limit=2048,
inc_image_corners=False, inc_valid_data=False,
inc_scp=True, inc_collection_wedge=False, block_size=10, remap_function=None):
"""
Create a kmz view for the reader contents. **This will create one file per
band/polarization present in the reader.**
Parameters
----------
reader : SICDTypeReader
output_directory : str
file_stem : str
pixel_limit : None|int
inc_image_corners : bool
Include the image corners, if possible?
inc_valid_data : bool
Include the valid image area, if possible?
inc_scp : bool
Include the scp?
inc_collection_wedge : bool
Include the aperture location and collection wedge?
block_size : None|int|float
The block size for the iterator
remap_function : None|RemapFunction
The remap function to apply, or a suitable default will be chosen.
Returns
-------
None
Examples
--------
.. code-block:: python
import logging
logger = logging.getLogger('sarpy')
logger.setLevel('INFO')
import os
from sarpy.io.complex.converter import open_complex
from sarpy.visualization.kmz_product_creation import create_kmz_view
test_root = '<root directory>'
reader = open_complex(os.path.join(test_root, '<file name>>'))
create_kmz_view(reader, test_root,
file_stem='View-<something descriptive>',
pixel_limit=2048,
inc_collection_wedge=True)
"""
def get_pol_abbreviation(pol_in):
spol = pol_in.split(':')
if len(spol) == 2:
return spol[0][0] + spol[1][0]
return pol_in
def do_iteration():
kmz_file = os.path.join(output_directory, '{}_{}_{}.kmz'.format(file_stem,
the_band,
get_pol_abbreviation(the_pol)))
logger.info('Writing kmz file for polarization {} and band {}'.format(the_pol, the_band))
with prepare_kmz_file(kmz_file, name=reader.file_name) as kmz_doc:
for the_partition, the_index, the_sicd in sicd_reader_iterator(
reader, partitions=partitions, polarization=the_pol, band=the_band):
add_sicd_to_kmz(
kmz_doc, reader,
index=the_index, pixel_limit=pixel_limit,
inc_image_corners=inc_image_corners, inc_valid_data=inc_valid_data,
inc_scp=inc_scp, inc_collection_wedge=inc_collection_wedge,
block_size=block_size, remap_function=remap_function)
bands = set(reader.get_sicd_bands())
pols = set(reader.get_sicd_polarizations())
partitions = reader.get_sicd_partitions()
for the_band in bands:
for the_pol in pols:
do_iteration()
| 24,512 | 33.283916 | 159 | py |
sarpy | sarpy-master/sarpy/visualization/cphd_kmz_product_creation.py | """
This module provides tools for creating kmz products for a CPHD type element.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Valkyrie Systems Corporation"
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
from sarpy.io.kml import Document
from sarpy.geometry.geocoords import ecf_to_geodetic
import sarpy.visualization.kmz_product_creation as kpc
WGS84_SEMIMAJOR = 6378137.0
WGS84_SEMIMINOR = 6356752.314245
logger = logging.getLogger(__name__)
def ray_ellipsoid_intersection(
semiaxis_a, semiaxis_b, semiaxis_c, pos_x, pos_y, pos_z, dir_x, dir_y, dir_z
):
"""
Compute the intersection of a Ray with an Ellipsoid
Parameters
----------
semiaxis_a, semiaxis_b, semiaxis_b: float
Semiaxes of the ellipsoid
pos_x, pos_y, pos_y: float
Ray origin
dir_x, dir_y, dir_y: float
Ray direction
Returns
-------
intersection_x, intersection_y, intersection_z: float
Point of intersection
"""
# Based on https://stephenhartzell.medium.com/satellite-line-of-sight-intersection-with-earth-d786b4a6a9b6
# >>> pos_x, pos_y, pos_z = symbols('pos_x pos_y pos_z')
# >>> dir_x, dir_y, dir_z = symbols('dir_x dir_y dir_z')
# >>> semiaxis_a, semiaxis_b, semiaxis_c = symbols('semiaxis_a semiaxis_b semiaxis_c')
# >>> distance = symbols('distance')
# >>> solutions = solve((pos_x + distance*dir_x)**2/semiaxis_a**2 + (pos_y + distance*dir_y)**2/semiaxis_b**2 + (pos_z + distance*dir_z)**2/semiaxis_c**2 - 1, distance)
# >>> print(solutions[0])
# black formatted
distance = (
-dir_x * pos_x * semiaxis_b**2 * semiaxis_c**2
- dir_y * pos_y * semiaxis_a**2 * semiaxis_c**2
- dir_z * pos_z * semiaxis_a**2 * semiaxis_b**2
- semiaxis_a
* semiaxis_b
* semiaxis_c
* np.sqrt(
-(dir_x**2) * pos_y**2 * semiaxis_c**2
- dir_x**2 * pos_z**2 * semiaxis_b**2
+ dir_x**2 * semiaxis_b**2 * semiaxis_c**2
+ 2 * dir_x * dir_y * pos_x * pos_y * semiaxis_c**2
+ 2 * dir_x * dir_z * pos_x * pos_z * semiaxis_b**2
- dir_y**2 * pos_x**2 * semiaxis_c**2
- dir_y**2 * pos_z**2 * semiaxis_a**2
+ dir_y**2 * semiaxis_a**2 * semiaxis_c**2
+ 2 * dir_y * dir_z * pos_y * pos_z * semiaxis_a**2
- dir_z**2 * pos_x**2 * semiaxis_b**2
- dir_z**2 * pos_y**2 * semiaxis_a**2
+ dir_z**2 * semiaxis_a**2 * semiaxis_b**2
)
) / (
dir_x**2 * semiaxis_b**2 * semiaxis_c**2
+ dir_y**2 * semiaxis_a**2 * semiaxis_c**2
+ dir_z**2 * semiaxis_a**2 * semiaxis_b**2
)
if np.isnan(distance):
raise ValueError("Ray does not intersect ellipsoid")
if distance < 0:
raise ValueError("Ray points away from ellipsoid")
return (
pos_x + distance * dir_x,
pos_y + distance * dir_y,
pos_z + distance * dir_z,
)
def ray_intersect_earth(position, direction):
"""Intersect an ECEF ray with the earth"""
point_ecf = ray_ellipsoid_intersection(
WGS84_SEMIMAJOR,
WGS84_SEMIMAJOR,
WGS84_SEMIMINOR,
position[0],
position[1],
position[2],
direction[0],
direction[1],
direction[2],
)
return point_ecf
def _create_cphd_styles(kmz_document):
def _setpolygon(name, *, bbggrr, low_aa, low_width, high_aa, high_width, outline="1"):
opaque = "ff"
kmz_document.add_style(
name + "_high",
line_style={"color": opaque + bbggrr, "width": high_width},
poly_style={"color": high_aa + bbggrr, "outline": str(outline)},
)
kmz_document.add_style(
name + "_low",
line_style={"color": opaque + bbggrr, "width": low_width},
poly_style={"color": low_aa + bbggrr, "outline": str(outline)},
)
kmz_document.add_style_map(name, name + "_high", name + "_low")
# iarp - intended for basic point clamped to ground
label = {"color": "ff50c0c0", "scale": "1.0"}
icon = {
"color": "ff5050c0",
"scale": "1.5",
"icon_ref": "http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png",
}
kmz_document.add_style("iarp_high", label_style=label, icon_style=icon)
label["scale"] = "0.75"
icon["scale"] = "1.0"
kmz_document.add_style("iarp_low", label_style=label, icon_style=icon)
kmz_document.add_style_map("iarp", "iarp_high", "iarp_low")
# srp position style - intended for gx track
line = {"color": "ff50ff50", "width": "1.5"}
label = {"color": "ffc0c0c0", "scale": "0.5"}
icon = {
"scale": "2.0",
"icon_ref": "http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png",
}
poly = {"color": "a050ff50"}
kmz_document.add_style(
"srp_high", line_style=line, label_style=label, icon_style=icon, poly_style=poly
)
line["width"] = "1.0"
icon["scale"] = "1.0"
poly = {"color": "7050ff50"}
kmz_document.add_style(
"srp_low", line_style=line, label_style=label, icon_style=icon, poly_style=poly
)
kmz_document.add_style_map("srp", "srp_high", "srp_low")
_setpolygon(
"mechanical_boresight",
bbggrr="a00000",
low_aa="70",
low_width="2.0",
high_aa="a0",
high_width="3.5",
outline="0",
)
_setpolygon(
"electrical_boresight",
bbggrr="a0a050",
low_aa="70",
low_width="2.0",
high_aa="a0",
high_width="3.5",
outline="0",
)
_setpolygon(
"globalimagearea",
bbggrr="aa00ff",
low_aa="00",
low_width="1.0",
high_aa="00",
high_width="1.5",
)
_setpolygon(
"channelimagearea",
bbggrr="00aa7f",
low_aa="70",
low_width="1.0",
high_aa="a0",
high_width="1.5",
)
_setpolygon(
"rcv_beam_footprint",
bbggrr="ffaa55",
low_aa="70",
low_width="1.0",
high_aa="a0",
high_width="1.5",
)
_setpolygon(
"tx_beam_footprint",
bbggrr="0000ff",
low_aa="70",
low_width="1.0",
high_aa="a0",
high_width="1.5",
)
def _imagearea_kml_coord(reader, imagearea_node):
"""Create KML coords of an imagearea footprint"""
if imagearea_node.Polygon is not None:
verts = [
vert.get_array()
for vert in sorted(imagearea_node.Polygon, key=lambda x: x.index)
]
else:
x1, y1 = imagearea_node.X1Y1.get_array()
x2, y2 = imagearea_node.X2Y2.get_array()
verts = [(x1, y1), (x1, y2), (x2, y2), (x2, y1)]
verts.append(verts[0])
return _xy_to_kml_coord(reader, verts)
def cphd_create_kmz_view(reader, output_directory, file_stem="view"):
"""
Create a kmz view for the reader contents.
Parameters
----------
reader : CPHDTypeReader
output_directory : str
file_stem : str
Returns
-------
None
Examples
--------
.. code-block:: python
import logging
logger = logging.getLogger('sarpy')
logger.setLevel('INFO')
import os
import sarpy.io.phase_history
from sarpy.visualization.cphd_kmz_product_creation import cphd_create_kmz_view
test_root = '<root directory>'
reader = sarpy.io.phase_history.open(os.path.join(test_root, '<file name>>'))
cphd_create_kmz_view(reader, test_root,
file_stem='View-<something descriptive>')
"""
def add_global(kmz_doc, root):
logger.info("Adding global to kmz.")
folder = kmz_doc.add_container(
par=root, the_type="Folder", name="Global", description="Global Metadata"
)
iarp_llh = reader.cphd_meta.SceneCoordinates.IARP.LLH.get_array()
frm = "{1:0.8f},{0:0.8f},0" # SICD version throws away height
coords = frm.format(*iarp_llh)
placemark = kmz_doc.add_container(
par=folder, name="IARP", description="IARP", styleUrl="#iarp"
)
kmz_doc.add_point(coords, par=placemark, altitudeMode="absolute")
ia_coords = _imagearea_kml_coord(
reader, reader.cphd_meta.SceneCoordinates.ImageArea
)
placemark = kmz_doc.add_container(
par=folder,
name="ImageArea",
description="ImageArea",
styleUrl="#globalimagearea",
)
kmz_doc.add_polygon(
" ".join(ia_coords),
par=placemark,
name="ImageArea",
altitudeMode="absolute",
)
def add_channel(kmz_doc, root, channel_name):
channel_names = [chan.Identifier for chan in reader.cphd_meta.Data.Channels]
channel_index = channel_names.index(channel_name)
logger.info(f"Adding channel '{channel_name}' to kmz.")
signal = reader.read_pvp_variable("SIGNAL", channel_name)
if signal is None:
num_vectors = reader.cphd_meta.Data.Channels[channel_index].NumVectors
signal = np.ones(num_vectors)
num_subselect = 24
indices = np.where(signal == 1)[0]
indices = indices[
np.round(
np.linspace(0, indices.size - 1, num_subselect, endpoint=True)
).astype(int)
]
times = (
reader.read_pvp_variable("TxTime", channel_name)[indices]
+ reader.read_pvp_variable("RcvTime", channel_name)[indices]
) / 2.0
arp_pos = (
reader.read_pvp_variable("TxPos", channel_name)[indices]
+ reader.read_pvp_variable("RcvPos", channel_name)[indices]
) / 2.0
srp_pos = reader.read_pvp_variable("SRPPos", channel_name)[indices]
collection_start = reader.cphd_meta.Global.Timeline.CollectionStart.astype(
"datetime64[us]"
)
whens = collection_start + (times * 1e6).astype("timedelta64[us]")
whens = [str(time) + "Z" for time in whens]
time_args = {"beginTime": whens[0], "endTime": whens[-1]}
folder = kmz_doc.add_container(
par=root,
the_type="Folder",
name=f"Channel {channel_name}",
description=f"Channel {channel_name}",
when=whens[0],
)
arp_coords = ecef_to_kml_coord(arp_pos)
placemark = kmz_doc.add_container(
par=folder,
name=channel_name,
description=f"aperture position for channel {channel_name}",
styleUrl="#arp",
**time_args,
)
kmz_doc.add_gx_track(
arp_coords,
whens,
par=placemark,
extrude=True,
tesselate=True,
altitudeMode="absolute",
)
srp_coords = ecef_to_kml_coord(srp_pos)
placemark = kmz_doc.add_container(
par=folder,
name="SRP",
description=f"stabilization reference point for channel {channel_name}",
styleUrl="#srp",
**time_args,
)
kmz_doc.add_gx_track(
srp_coords,
whens,
par=placemark,
extrude=True,
tesselate=True,
altitudeMode="absolute",
)
if reader.cphd_meta.Channel.Parameters[channel_index].ImageArea is not None:
ia_coords = _imagearea_kml_coord(
reader, reader.cphd_meta.Channel.Parameters[channel_index].ImageArea
)
placemark = kmz_doc.add_container(
par=folder,
name="ImageArea",
description=f"ImageArea for channel {channel_name}",
styleUrl="#channelimagearea",
)
kmz_doc.add_polygon(
" ".join(ia_coords),
par=placemark,
name=f"ImageArea for channel {channel_name}",
altitudeMode="absolute",
)
aiming = _antenna_aiming(reader, channel_index, signal)
antenna_folder = kmz_doc.add_container(
the_type="Folder",
par=folder,
name="Antenna",
description=f"Antenna Aiming for channel {channel_name}",
)
boresight_folder = kmz_doc.add_container(
the_type="Folder",
par=antenna_folder,
name="Boresights",
description=f"Boresights for channel {channel_name}",
)
footprint_folder = kmz_doc.add_container(
the_type="Folder",
par=antenna_folder,
name="3dB Footprints",
description=f"Beam Footprints for channel {channel_name}",
)
for txrcv in ("Tx", "Rcv"):
for when in ('start', 'middle', 'end'):
if when not in aiming[txrcv]['beam_footprint']:
continue # footprint calculation must have failed
this_footprint = aiming[txrcv]['beam_footprint'][when]
name = f'{txrcv} beam footprint @ {when}'
timestamp = str(collection_start + (this_footprint['time'] * 1e6).astype("timedelta64[us]")) + 'Z'
placemark = kmz_doc.add_container(
par=footprint_folder,
name=name,
description=f"{name} for channel {channel_name}",
styleUrl=f'#{txrcv.lower()}_beam_footprint',
visibility=True,
when=timestamp,
)
coords = ecef_to_kml_coord(this_footprint['contour'])
kmz_doc.add_polygon(
" ".join(coords), par=placemark,
)
for boresight_type in ("mechanical", "electrical"):
visibility = txrcv == "Rcv" # only display Rcv by default
name = f"{txrcv} {boresight_type} boresight"
on_earth_ecf = np.asarray(
[
ray_intersect_earth(apc_pos, along)
for apc_pos, along in zip(
aiming[txrcv]["apc_position"][indices],
aiming[txrcv]["pointing"][boresight_type][indices],
)
]
)
placemark = kmz_doc.add_container(
par=boresight_folder,
name=name,
description=f"{name} for channel {channel_name}<br><br>Highlighted edge indicates start time",
styleUrl=f"#{boresight_type}_boresight",
visibility=visibility,
)
boresight_coords = ecef_to_kml_coord(on_earth_ecf)
# complex 3d polygons don't always render nicely. So, we'll manually triangluate it.
mg = kmz_doc.add_multi_geometry(par=placemark)
# Highlight the starting point
kmz_doc.add_line_string(coords=' '.join([arp_coords[0], boresight_coords[0]]),
par=mg,
altitudeMode='absolute',)
for idx in range(len(arp_coords)-1):
coords = [
arp_coords[idx],
boresight_coords[idx],
arp_coords[idx+1],
arp_coords[idx],
]
kmz_doc.add_polygon(' '.join(coords), par=mg, altitudeMode='absolute')
coords = [
boresight_coords[idx],
boresight_coords[idx+1],
arp_coords[idx+1],
boresight_coords[idx],
]
kmz_doc.add_polygon(' '.join(coords), par=mg, altitudeMode='absolute')
kmz_file = os.path.join(output_directory, f"{file_stem}_cphd.kmz")
with prepare_kmz_file(kmz_file, name=reader.file_name) as kmz_doc:
root = kmz_doc.add_container(
the_type="Folder", name=reader.cphd_meta.CollectionID.CoreName
)
add_global(kmz_doc, root)
for chan in reader.cphd_meta.Data.Channels:
add_channel(kmz_doc, root, channel_name=chan.Identifier)
def ecef_to_kml_coord(ecf_points):
# TODO apply geoid. KML expects MSL
llh_points = ecf_to_geodetic(ecf_points)
return [f"{lon},{lat},{alt}" for (lat, lon, alt) in llh_points]
def prepare_kmz_file(file_name, **args):
"""
Prepare a kmz document and archive for exporting.
Parameters
----------
file_name : str
args
Passed through to the Document constructor.
Returns
-------
Document
"""
document = Document(file_name=file_name, **args)
kpc._create_sicd_styles(document)
_create_cphd_styles(document)
return document
def _xy_to_kml_coord(reader, xy):
"""Convert a ReferenceSurface XY location to a kml coordinate"""
xy = np.atleast_2d(xy)
if reader.cphd_meta.SceneCoordinates.ReferenceSurface.Planar is not None:
iarp_ecf = reader.cphd_meta.SceneCoordinates.IARP.ECF.get_array()
uiax = (
reader.cphd_meta.SceneCoordinates.ReferenceSurface.Planar.uIAX.get_array()
)
uiay = (
reader.cphd_meta.SceneCoordinates.ReferenceSurface.Planar.uIAY.get_array()
)
xy_ecf = iarp_ecf + xy[:, 0, np.newaxis] * uiax + xy[:, 1, np.newaxis] * uiay
else:
raise NotImplementedError
return ecef_to_kml_coord(xy_ecf)
def _antenna_aiming(reader, channel_index, signal_pvp):
cphd_meta = reader.cphd_meta
results = {}
if cphd_meta.Antenna is None:
return results
apcs = {}
for apc in cphd_meta.Antenna.AntPhaseCenter:
apcs[apc.Identifier] = apc
acfs = {}
for acf in cphd_meta.Antenna.AntCoordFrame:
acfs[acf.Identifier] = acf
patterns = {}
for antpat in cphd_meta.Antenna.AntPattern:
patterns[antpat.Identifier] = antpat
def _compute_pointing(channel_index, apc_id, antpat_id, txrcv):
times = reader.read_pvp_variable(f"{txrcv}Time", channel_index)
uacx = reader.read_pvp_variable(f"{txrcv}ACX", channel_index)
uacy = reader.read_pvp_variable(f"{txrcv}ACY", channel_index)
if uacx is None or uacy is None:
acf_id = apcs[apc_id].ACFId
uacx = acfs[acf_id].XAxisPoly(times)
uacy = acfs[acf_id].YAxisPoly(times)
uacz = np.cross(uacx, uacy)
pointing = {}
pointing['raw'] = {
'times': times,
'uacx': uacx,
'uacy': uacy,
'uacz': uacz,
}
pointing["mechanical"] = uacz
ebpvp = reader.read_pvp_variable(f"{txrcv}EB", channel_index)
if ebpvp is not None:
eb_dcx = ebpvp[:, 0]
eb_dcy = ebpvp[:, 1]
else:
eb_dcx = patterns[antpat_id].EB.DCXPoly(times)
eb_dcy = patterns[antpat_id].EB.DCYPoly(times)
pointing['raw']['eb_dcx'] = eb_dcx
pointing['raw']['eb_dcy'] = eb_dcy
pointing["electrical"] = _acf_to_ecef(eb_dcx, eb_dcy, uacx, uacy)
return pointing
def _beam_footprint(antpat_id, apc_pos, time, uacx, uacy, eb_dcx, eb_dcy):
"""Compute a beam contour on the earth"""
array_gain_poly = patterns[antpat_id].Array.GainPoly
element_gain_poly = patterns[antpat_id].Element.GainPoly
approx_gain_coefs = np.zeros((3, 3))
approx_gain_coefs += np.pad(array_gain_poly.Coefs, [(0, 3), (0, 3)])[:3, :3]
approx_gain_coefs += np.pad(element_gain_poly.Coefs, [(0, 3), (0, 3)])[:3, :3]
Ns = 201
db_down = 10 # dB down from peak
deltaDC_Xmax = np.abs((-approx_gain_coefs[1, 0] +
np.sqrt(approx_gain_coefs[1, 0]**2-4*approx_gain_coefs[2, 0]*db_down)) /
(2*approx_gain_coefs[2, 0]))
deltaDC_Ymax = np.abs((-approx_gain_coefs[0, 1] +
np.sqrt(approx_gain_coefs[0, 1]**2-4*approx_gain_coefs[0, 2]*db_down)) /
(2*approx_gain_coefs[0, 2]))
X = np.linspace(-deltaDC_Xmax, deltaDC_Xmax, Ns)
Y = np.linspace(-deltaDC_Ymax, deltaDC_Ymax, Ns)
XXc, YYc = np.meshgrid(X, Y, indexing='ij')
array_gain_pattern = array_gain_poly(XXc, YYc)
element_gain_pattern = element_gain_poly(XXc + eb_dcx, YYc + eb_dcy)
gain_pattern = array_gain_pattern + element_gain_pattern
contour_levels = [-3] # dB
contour_sets = plt.contour(XXc, YYc, gain_pattern, levels=contour_levels)
plt.close() # close the figure created by contour
contour_vertices = contour_sets.collections[0].get_paths()[0].vertices
delta_dcx = contour_vertices[:, 0]
delta_dcy = contour_vertices[:, 1]
contour_pointing = _acf_to_ecef(delta_dcx + eb_dcx,
delta_dcy + eb_dcy,
uacx,
uacy,)
contour_earth_ecf = []
for along in contour_pointing:
contour_earth_ecf.append(ray_intersect_earth(apc_pos, along))
return {
'time': time,
'contour': contour_earth_ecf
}
chan_params = cphd_meta.Channel.Parameters[channel_index]
if not chan_params.Antenna:
return {}
result = {}
tx_apc_id = chan_params.Antenna.TxAPCId
result["Tx"] = {
"APCId": tx_apc_id,
"apc_position": reader.read_pvp_variable("TxPos", channel_index),
"pointing": _compute_pointing(
channel_index, tx_apc_id, chan_params.Antenna.TxAPATId, "Tx"
),
}
rcv_apc_id = chan_params.Antenna.RcvAPCId
result["Rcv"] = {
"APCId": rcv_apc_id,
"apc_position": reader.read_pvp_variable("RcvPos", channel_index),
"pointing": _compute_pointing(
channel_index, rcv_apc_id, chan_params.Antenna.RcvAPATId, "Rcv"
),
}
indices = np.where(signal_pvp == 1)[0]
result['Tx']['beam_footprint'] = {}
result['Rcv']['beam_footprint'] = {}
for name, pvp_index in [('start', indices[0]),
('middle', indices[len(indices)//2]),
('end', indices[-1])]:
try:
result['Tx']['beam_footprint'][name] = _beam_footprint(antpat_id=chan_params.Antenna.TxAPATId,
apc_pos=reader.read_pvp_variable("TxPos", channel_index)[pvp_index],
time=reader.read_pvp_variable("TxTime", channel_index)[pvp_index],
uacx=result['Tx']['pointing']['raw']['uacx'][pvp_index],
uacy=result['Tx']['pointing']['raw']['uacy'][pvp_index],
eb_dcx=result['Tx']['pointing']['raw']['eb_dcx'][pvp_index],
eb_dcy=result['Tx']['pointing']['raw']['eb_dcy'][pvp_index],
)
except Exception as exc:
logger.warning(f"Exception while calculating Tx beam footprint of {chan_params.Antenna.TxAPATId}")
logger.warning(exc)
try:
result['Rcv']['beam_footprint'][name] = _beam_footprint(antpat_id=chan_params.Antenna.RcvAPATId,
apc_pos=reader.read_pvp_variable("RcvPos", channel_index)[pvp_index],
time=reader.read_pvp_variable("RcvTime", channel_index)[pvp_index],
uacx=result['Rcv']['pointing']['raw']['uacx'][pvp_index],
uacy=result['Rcv']['pointing']['raw']['uacy'][pvp_index],
eb_dcx=result['Rcv']['pointing']['raw']['eb_dcx'][pvp_index],
eb_dcy=result['Rcv']['pointing']['raw']['eb_dcy'][pvp_index],
)
except Exception as exc:
logger.warning(f"Exception while calculating Rcv beam footprint of {chan_params.Antenna.RcvAPATId}")
logger.warning(str(exc))
return result
def _acf_to_ecef(eb_dcx, eb_dcy, uacx, uacy):
uacz = np.cross(uacx, uacy)
eb_dcz = np.sqrt(1 - eb_dcx**2 - eb_dcy**2)
eb = np.stack((eb_dcx, eb_dcy, eb_dcz)).T
eb_pointing = eb[:, 0, np.newaxis] * uacx
eb_pointing += eb[:, 1, np.newaxis] * uacy
eb_pointing += eb[:, 2, np.newaxis] * uacz
return eb_pointing
| 24,545 | 34.625544 | 172 | py |
sarpy | sarpy-master/sarpy/visualization/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/visualization/remap.py | """
Provides common methods for remapping a complex or other array to 8 or 16-bit
image type arrays.
Note: The original function and 8-bit implementation has been replaced with a
class based solution which allows state variables associated with the remap
function, and support for 16-bit versions, as well as an 8-bit MA, RGB or RGBA
lookup tables.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Wade Schwartzkopf", "Thomas McCullough")
import logging
from collections import OrderedDict
from typing import Dict, Union, Tuple, Type, List, Optional
import warnings
import numpy
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.utils import get_data_mean_magnitude, stats_calculation, \
get_data_extrema
try:
from matplotlib import cm
except ImportError:
cm = None
logger = logging.getLogger(__name__)
_DEFAULTS_REGISTERED = False
_REMAP_DICT = OrderedDict() # type: Dict[str, RemapFunction]
###########
# helper functions
def clip_cast(
array: numpy.ndarray,
dtype: Union[str, numpy.dtype, numpy.number] = 'uint8',
min_value: Union[None, int, float] = None,
max_value: Union[None, int, float] = None) -> numpy.ndarray:
"""
Cast by clipping values outside of valid range, rather than truncating.
Parameters
----------
array : numpy.ndarray
dtype : str|numpy.dtype
min_value : None|int|float
max_value : None|int|float
Returns
-------
numpy.ndarray
"""
np_type = numpy.dtype(dtype)
min_value = numpy.iinfo(np_type).min if min_value is None else max(min_value, numpy.iinfo(np_type).min)
max_value = numpy.iinfo(np_type).max if max_value is None else min(max_value, numpy.iinfo(np_type).max)
return numpy.clip(array, min_value, max_value).astype(np_type)
def amplitude_to_density(
data: numpy.ndarray,
dmin: Union[int, float] = 30,
mmult: Union[int, float] = 40,
data_mean: Union[None, int, float] = None) -> numpy.ndarray:
"""
Convert to density data for remap.
This is a digested version of contents presented in a 1994 pulication
entitled "Softcopy Display of SAR Data" by Kevin Mangis. It is unclear where
this was first published or where it may be publicly available.
Parameters
----------
data : numpy.ndarray
The (presumably complex) data to remap
dmin : float|int
A dynamic range parameter. Lower this widens the range, will raising it
narrows the range. This was historically fixed at 30.
mmult : float|int
A contrast parameter. Low values will result is higher contrast and quicker
saturation, while high values will decrease contrast and slower saturation.
There is some balance between the competing effects in the `dmin` and `mmult`
parameters.
data_mean : None|float|int
The data mean (for this or the parent array for continuity), which will
be calculated if not provided.
Returns
-------
numpy.ndarray
"""
dmin = float(dmin)
if not (0 <= dmin < 255):
raise ValueError('Invalid dmin value {}'.format(dmin))
mmult = float(mmult)
if mmult < 1:
raise ValueError('Invalid mmult value {}'.format(mmult))
EPS = 1e-5
amplitude = numpy.abs(data)
if numpy.all(amplitude == 0):
return amplitude
else:
if not data_mean:
data_mean = numpy.mean(amplitude[numpy.isfinite(amplitude)])
# remap parameters
C_L = 0.8*data_mean
C_H = mmult*C_L # decreasing mmult will result in higher contrast (and quicker saturation)
slope = (255 - dmin)/numpy.log10(C_H/C_L)
constant = dmin - (slope*numpy.log10(C_L))
# NB: C_H/C_L trivially collapses to mmult, but this is maintained for
# clarity in historical reference
# Originally, C_L and C_H were static values drawn from a determined set
# of remap look-up tables. The C_L/C_H values were presumably based roughly
# on mean amplitude and desired remap brightness/contrast. The dmin value
# was fixed as 30.
return slope*numpy.log10(numpy.maximum(amplitude, EPS)) + constant
def _linear_map(
data: numpy.ndarray,
min_value: float,
max_value: float) -> numpy.ndarray:
"""
Helper function which maps the input data, assumed to be of the correct from,
into [0, 1] via a linear mapping (data - min_value)(max_value - min_value)
and then clipping.
Parameters
----------
data : numpy.ndarray
min_value : float
max_value : float
Returns
-------
numpy.ndarray
"""
return numpy.clip((data - min_value)/float(max_value - min_value), 0, 1)
def _nrl_stats(
amplitude: numpy.ndarray,
percentile: Union[int, float] = 99) -> Tuple[float, float, float]:
"""
Calculate the statistics for input into the nrl remap.
Parameters
----------
amplitude : numpy.ndarray
The amplitude array, assumed real valued.
percentile : float|int
Which percentile to calculate
Returns
-------
minimum: float
Observed minimum
maximum: float
Observed maximum
percentile: float
Observed value with desired percentile
"""
finite_mask = numpy.isfinite(amplitude)
if numpy.any(finite_mask):
return stats_calculation(amplitude[finite_mask], percentile=percentile)
else:
return 0., 0., 0.
############
# remap callable classes
class RemapFunction(object):
"""
Abstract remap class which is callable.
See the :func:`call` implementation for the given class to understand
what specific keyword arguments are allowed for the specific instance.
"""
_name = '_RemapFunction'
__slots__ = ('_override_name', '_bit_depth', '_dimension')
_allowed_dimension = {0, 1, 2, 3, 4}
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
dimension: int = 0):
"""
Parameters
----------
override_name : None|str
Override name for a specific class instance
bit_depth : int
Should be one of 8 or 16
dimension : int
"""
self._override_name = None
self._bit_depth = None
self._dimension = None
self._set_name(override_name)
self._set_bit_depth(bit_depth)
self._set_dimension(dimension)
@property
def name(self) -> str:
"""
str: The (read-only) name for the remap function. This will be the
override_name if one has been provided for this instance, otherwise it
will be the generic `_name` class value.
"""
return self._name if self._override_name is None else self._override_name
def _set_name(self, value: Optional[str]):
if value is None or isinstance(value, str):
self._override_name = value
else:
raise ValueError('Got incompatible name')
@property
def bit_depth(self) -> int:
"""
int: The (read-only) bit depth, which should be either 8 or 16.
This is expected to be enforced by the implementation directly.
"""
return self._bit_depth
def _set_bit_depth(self, value: int):
"""
This is intended to be read-only.
Parameters
----------
value : int
"""
value = int(value)
if value not in [8, 16, 32]:
raise ValueError(
'Bit depth is required to be one of 8, 16, or 32 and we got `{}`'.format(value))
self._bit_depth = value
@property
def dimension(self) -> int:
"""
int: The (read-only) size of the (additional) output final dimension.
The value 0 is monochromatic, where the retuned output will have identical
shape as input. Any other value should have additional final dimension of this size.
"""
return self._dimension
def _set_dimension(self, value: int):
"""
The property is intended to be read-only.
Parameters
----------
value : int
"""
value = int(value)
if self._allowed_dimension is not None and value not in self._allowed_dimension:
raise ValueError(
'Dimension is required to be one of `{}`, got `{}`'.format(self._allowed_dimension, value))
self._dimension = value
@property
def output_dtype(self) -> numpy.dtype:
"""
numpy.dtype: The output data type.
"""
if self._bit_depth == 8:
return numpy.dtype('u1')
elif self._bit_depth == 16:
return numpy.dtype('u2')
elif self._bit_depth == 32:
return numpy.dtype('u4')
else:
raise ValueError('Unhandled bit_depth `{}`'.format(self._bit_depth))
@property
def are_global_parameters_set(self) -> bool:
"""
bool: Are (all) global parameters used for applying this remap function
set? This should return `True` if there are no global parameters.
"""
return True
def raw_call(
self,
data: numpy.ndarray,
**kwargs) -> numpy.ndarray:
"""
This performs the mapping from input data to output floating point
version, this is directly used by the :func:`call` method.
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
kwargs
Some keyword arguments may be allowed here
Returns
-------
numpy.ndarray
This should generally have `float64` dtype.
"""
raise NotImplementedError
def call(
self,
data: numpy.ndarray,
**kwargs) -> numpy.ndarray:
"""
This performs the mapping from input data to output discrete version.
This method os directly called by the :func:`__call__` method, so the
class instance (once constructed) is itself callable, as follows:
>>> remap = RemapFunction()
>>> discrete_data = remap(data, **kwargs)
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
kwargs
Some keyword arguments may be allowed here
Returns
-------
numpy.ndarray
"""
return clip_cast(self.raw_call(data, **kwargs), dtype=self.output_dtype)
def __call__(
self,
data: numpy.ndarray,
**kwargs) -> numpy.ndarray:
return self.call(data, **kwargs)
@staticmethod
def _validate_pixel_bounds(
reader: SICDTypeReader,
index: int,
pixel_bounds: Union[None, Tuple, List, numpy.ndarray]):
data_size = reader.get_data_size_as_tuple()[index]
if pixel_bounds is None:
return 0, data_size[0], 0, data_size[1]
if not (
(-data_size[0] <= pixel_bounds[0] <= data_size[0]) and
(-data_size[0] <= pixel_bounds[1] <= data_size[0]) and
(-data_size[1] <= pixel_bounds[2] <= data_size[1]) and
(-data_size[1] <= pixel_bounds[3] <= data_size[1])):
raise ValueError('invalid pixel bounds `{}` for data of shape `{}`'.format(pixel_bounds, data_size))
return pixel_bounds
def calculate_global_parameters_from_reader(
self,
reader: SICDTypeReader,
index: int = 0,
pixel_bounds: Union[None, Tuple, List, numpy.ndarray] = None):
"""
Calculates any useful global bounds for the specified reader, the given
index, and inside the given pixel bounds.
This is expected to save ny necessary state here.
Parameters
----------
reader : SICDTypeReader
index : int
pixel_bounds : None|tuple|list|numpy.ndarray
If provided, is of the form `(row min, row max, column min, column max)`.
Returns
-------
None
"""
raise NotImplementedError
class MonochromaticRemap(RemapFunction):
"""
Abstract monochromatic remap class.
"""
_name = '_Monochromatic'
__slots__ = ('_override_name', '_bit_depth', '_dimension', '_max_output_value')
_allowed_dimension = {0, }
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None):
r"""
Parameters
----------
override_name : None|str
Override name for a specific class instance
bit_depth : int
max_output_value : None|int
The maximum output value. If provided, this must be in the interval
:math:`[0, 2^{bit\_depth}]`
"""
self._max_output_value = None
RemapFunction.__init__(self, override_name=override_name, bit_depth=bit_depth, dimension=0)
self._set_max_output_value(max_output_value)
@property
def max_output_value(self) -> int:
"""
int: The (read-only) maximum output value size.
"""
return self._max_output_value
def _set_max_output_value(self, value: Optional[int]):
max_possible = numpy.iinfo(self.output_dtype).max
if value is None:
value = max_possible
else:
value = int(value)
if 0 < value <= max_possible:
self._max_output_value = value
else:
raise ValueError(
'the max_output_value must be between 0 and {}, '
'got {}'.format(max_possible, value))
def raw_call(self, data, **kwargs):
raise NotImplementedError
def calculate_global_parameters_from_reader(self, reader, index=0, pixel_bounds=None):
raise NotImplementedError
############
# basic monchromatic collection
class Density(MonochromaticRemap):
"""
A monochromatic logarithmic density remapping function.
This is a digested version of contents presented in a 1994 publication
entitled "Softcopy Display of SAR Data" by Kevin Mangis. It is unclear where
this was first published or where it may be publicly available.
"""
__slots__ = ('_override_name', '_bit_depth', '_dimension', '_dmin', '_mmult', '_eps', '_data_mean')
_name = 'density'
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None,
dmin: Union[float, int] = 30,
mmult: Union[float, int] = 40,
eps: float = 1e-5,
data_mean: Union[None, int, float] = None):
r"""
Parameters
----------
override_name : None|str
Override name for a specific class instance
bit_depth : int
max_output_value : None|int
The maximum output value. If provided, this must be in the interval
:math:`[0, 2^{bit\_depth}]`
dmin : float|int
A dynamic range parameter. Lower this widens the range, will raising it
narrows the range. This was historically fixed at 30.
mmult : float|int
A contrast parameter. Low values will result is higher contrast and quicker
saturation, while high values will decrease contrast and slower saturation.
There is some balance between the competing effects in the `dmin` and `mmult`
parameters.
eps : float
small offset to create a nominal floor when mapping data containing 0's.
data_mean : None|float|int
The global data mean (for continuity). The appropriate value will be
calculated on a per calling array basis if not provided.
"""
MonochromaticRemap.__init__(self, override_name=override_name, bit_depth=bit_depth, max_output_value=max_output_value)
self._data_mean = None
self._dmin = None
self._mmult = None
self._eps = float(eps)
self._set_dmin(dmin)
self._set_mmult(mmult)
self.data_mean = data_mean
@property
def dmin(self) -> float:
"""
float: The dynamic range parameter. This is read-only.
"""
return self._dmin
def _set_dmin(self, value: Union[int, float]):
value = float(value)
if not (0 <= value < 255):
raise ValueError('dmin must be in the interval [0, 255), got value {}'.format(value))
self._dmin = value
@property
def mmult(self) -> float:
"""
float: The contrast parameter. This is read only.
"""
return self._mmult
def _set_mmult(self, value: Union[int, float]):
value = float(value)
if value < 1:
raise ValueError('mmult must be < 1, got {}'.format(value))
self._mmult = value
@property
def data_mean(self) -> Optional[float]:
"""
None|float: The data mean for global use.
"""
return self._data_mean
@data_mean.setter
def data_mean(self, value: Optional[float]):
if value is None:
self._data_mean = None
return
self._data_mean = float(value)
@property
def are_global_parameters_set(self) -> bool:
"""
bool: Is the global parameters used for applying this remap function
set? In this case, this is the `data_mean` property.
"""
return self._data_mean is not None
def raw_call(
self,
data: numpy.ndarray,
data_mean: Optional[float] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output floating point
version, this is directly used by the :func:`call` method.
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
data_mean : None|float
The pre-calculated data mean, for consistent global use. The order
of preference is the value provided here, the class data_mean property
value, then the value calculated from the present sample.
Returns
-------
numpy.ndarray
"""
data_mean = float(data_mean) if data_mean is not None else self._data_mean
# the amplitude_to_density function is ort specifically geared towards
# dynamic range of 0 - 255, just adjust it.
multiplier = float(self.max_output_value)/255.0
return multiplier*amplitude_to_density(data, dmin=self.dmin, mmult=self.mmult, data_mean=data_mean)
def call(
self,
data: numpy.ndarray,
data_mean: Optional[float] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output discrete version.
This method os directly called by the :func:`__call__` method, so the
class instance (once constructed) is itself callable, as follows:
>>> remap = Density()
>>> discrete_data = remap(data, data_mean=85.2)
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
data_mean : None|float
The pre-calculated data mean, for consistent global use. The order
of preference is the value provided here, the class data_mean property
value, then the value calculated from the present sample.
Returns
-------
numpy.ndarray
"""
return clip_cast(
self.raw_call(data, data_mean=data_mean),
dtype=self.output_dtype, min_value=0, max_value=self.max_output_value)
def calculate_global_parameters_from_reader(
self,
reader: SICDTypeReader,
index: int = 0,
pixel_bounds: Union[None, tuple, list, numpy.ndarray] = None):
pixel_bounds = self._validate_pixel_bounds(reader, index, pixel_bounds)
self.data_mean = get_data_mean_magnitude(pixel_bounds, reader, index, 25*1024*1024)
class Brighter(Density):
"""
The density remap using parameters for brighter results.
"""
_name = 'brighter'
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None,
eps: float = 1e-5,
data_mean: Union[None, int, float] = None):
Density.__init__(
self,
override_name=override_name,
bit_depth=bit_depth,
max_output_value=max_output_value,
dmin=60,
mmult=40,
eps=eps,
data_mean=data_mean)
class Darker(Density):
"""
The density remap using parameters for darker results.
"""
_name = 'darker'
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None,
eps: float = 1e-5,
data_mean: Union[None, int, float] = None):
Density.__init__(
self,
override_name=override_name,
bit_depth=bit_depth,
max_output_value=max_output_value,
dmin=0,
mmult=40,
eps=eps,
data_mean=data_mean)
class High_Contrast(Density):
"""
The density remap using parameters for high contrast results.
"""
_name = 'high_contrast'
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None,
eps: float = 1e-5,
data_mean: Union[None, int, float] = None):
Density.__init__(
self,
override_name=override_name,
bit_depth=bit_depth,
max_output_value=max_output_value,
dmin=30,
mmult=4,
eps=eps,
data_mean=data_mean)
class Linear(MonochromaticRemap):
"""
A monochromatic linear remap function.
"""
__slots__ = ('_override_name', '_bit_depth', '_dimension', '_max_value', '_min_value')
_name = 'linear'
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None,
min_value: Union[None, int, float] = None,
max_value: Union[None, int, float] = None):
"""
Parameters
----------
override_name : None|str
Override name for a specific class instance
bit_depth : int
min_value : None|float
max_value : None|float
"""
MonochromaticRemap.__init__(
self,
override_name=override_name,
bit_depth=bit_depth,
max_output_value=max_output_value)
if min_value is not None:
min_value = float(min_value)
if max_value is not None:
max_value = float(max_value)
self._min_value = min_value
self._max_value = max_value
@property
def min_value(self) -> Optional[float]:
"""
None|float: The minimum value allowed (clipped below this)
"""
return self._min_value
@min_value.setter
def min_value(self, value: Optional[float]):
if value is None:
self._min_value = None
else:
value = float(value)
if not numpy.isfinite(value):
raise ValueError('Got unsupported minimum value `{}`'.format(value))
self._min_value = value
@property
def max_value(self) -> Optional[float]:
"""
None|float: The maximum value allowed (clipped above this)
"""
return self._max_value
@max_value.setter
def max_value(self, value: Optional[float]):
if value is None:
self._max_value = None
else:
value = float(value)
if not numpy.isfinite(value):
raise ValueError('Got unsupported maximum value `{}`'.format(value))
self._max_value = value
@property
def are_global_parameters_set(self) -> bool:
"""
bool: Are (all) global parameters used for applying this remap function
set? In this case, this is the `min_value` and `max_value` properties.
"""
return self._min_value is not None and self._max_value is not None
def _get_extrema(
self,
amplitude: numpy.ndarray,
min_value: Optional[float],
max_value: Optional[float]) -> Tuple[float, float]:
if min_value is not None:
min_value = float(min_value)
if max_value is not None:
max_value = float(max_value)
if min_value is None:
min_value = self.min_value
if min_value is None:
min_value = numpy.min(amplitude)
if max_value is None:
max_value = self.max_value
if max_value is None:
max_value = numpy.max(amplitude)
# sanity check
if min_value > max_value:
min_value, max_value = max_value, min_value
return min_value, max_value
def raw_call(
self,
data: numpy.ndarray,
min_value: Optional[float] = None,
max_value: Optional[float] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output floating point
version, this is directly used by the :func:`call` method.
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
min_value : None|float
A minimum threshold, or pre-calculated data minimum, for consistent
global use. The order of preference is the value provided here, the
class `min_value` property value, then calculated from the present
sample.
max_value : None|float
A maximum value threshold, or pre-calculated data maximum, for consistent
global use. The order of preference is the value provided here, the
class `max_value` property value, then calculated from the present
sample.
Returns
-------
numpy.ndarray
"""
if numpy.iscomplexobj(data):
amplitude = numpy.abs(data)
else:
amplitude = data
out = numpy.empty(amplitude.shape, dtype='float64')
max_output_value = self.max_output_value
finite_mask = numpy.isfinite(amplitude)
out[~finite_mask] = max_output_value
if numpy.any(finite_mask):
temp_data = amplitude[finite_mask]
min_value, max_value = self._get_extrema(temp_data, min_value, max_value)
if min_value == max_value:
out[finite_mask] = 0
else:
out[finite_mask] = max_output_value*_linear_map(amplitude[finite_mask], min_value, max_value)
return out
def call(
self,
data: numpy.ndarray,
min_value: Optional[float] = None,
max_value: Optional[float] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output discrete version.
This method os directly called by the :func:`__call__` method, so the
class instance (once constructed) is itself callable, as follows:
>>> remap = Linear()
>>> discrete_data = remap(data, min_value=0, max_value=100)
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
min_value : None|float
A minimum threshold, or pre-calculated data minimum, for consistent
global use. The order of preference is the value provided here, the
class `min_value` property value, then calculated from the present
sample.
max_value : None|float
A maximum value threshold, or pre-calculated data maximum, for consistent
global use. The order of preference is the value provided here, the
class `max_value` property value, then calculated from the present
sample.
Returns
-------
numpy.ndarray
"""
return clip_cast(
self.raw_call(data, min_value=min_value, max_value=max_value),
dtype=self.output_dtype, min_value=0, max_value=self.max_output_value)
def calculate_global_parameters_from_reader(
self,
reader: SICDTypeReader,
index: int = 0,
pixel_bounds: Union[tuple, list, numpy.ndarray] = None) -> None:
pixel_bounds = self._validate_pixel_bounds(reader, index, pixel_bounds)
self.min_value, self.max_value = get_data_extrema(
pixel_bounds, reader, index, 25*1024*1024, percentile=None)
class Logarithmic(MonochromaticRemap):
"""
A logarithmic remap function.
"""
__slots__ = ('_override_name', '_bit_depth', '_dimension', '_max_value', '_min_value')
_name = 'log'
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None,
min_value: Optional[float] = None,
max_value: Optional[float] = None):
"""
Parameters
----------
override_name : None|str
Override name for a specific class instance
bit_depth : int
min_value : None|float
max_value : None|float
"""
MonochromaticRemap.__init__(self, override_name=override_name, bit_depth=bit_depth, max_output_value=max_output_value)
if min_value is not None:
min_value = float(min_value)
if max_value is not None:
max_value = float(max_value)
self._min_value = min_value
self._max_value = max_value
@property
def min_value(self) -> Optional[float]:
"""
None|float: The minimum value allowed (clipped below this)
"""
return self._min_value
@min_value.setter
def min_value(self, value: Optional[float]):
if value is None:
self._min_value = None
else:
value = float(value)
if not numpy.isfinite(value):
raise ValueError('Got unsupported minimum value `{}`'.format(value))
self._min_value = value
@property
def max_value(self) -> Optional[float]:
"""
None|float: The minimum value allowed (clipped above this)
"""
return self._max_value
@max_value.setter
def max_value(self, value: Optional[float]):
if value is None:
self._max_value = None
else:
value = float(value)
if not numpy.isfinite(value):
raise ValueError('Got unsupported maximum value `{}`'.format(value))
self._max_value = value
@property
def are_global_parameters_set(self) -> bool:
"""
bool: Are (all) global parameters used for applying this remap function
set? In this case, this is the `min_value` and `max_value` properties.
"""
return self._min_value is not None and self._max_value is not None
def _get_extrema(
self,
amplitude: numpy.ndarray,
min_value: Optional[float],
max_value: Optional[float]) -> Tuple[float, float]:
if min_value is not None:
min_value = float(min_value)
if max_value is not None:
max_value = float(max_value)
if min_value is None:
min_value = self.min_value
if min_value is None:
min_value = numpy.min(amplitude)
if max_value is None:
max_value = self.max_value
if max_value is None:
max_value = numpy.max(amplitude)
# sanity check
if min_value > max_value:
min_value, max_value = max_value, min_value
return min_value, max_value
def raw_call(
self,
data: numpy.ndarray,
min_value: Optional[float] = None,
max_value: Optional[float] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output floating point
version, this is directly used by the :func:`call` method.
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
min_value : None|float
A minimum threshold, or pre-calculated data minimum, for consistent
global use. The order of preference is the value provided here, the
class `min_value` property value, then calculated from the present
sample.
max_value : None|float
A maximum value threshold, or pre-calculated data maximum, for consistent
global use. The order of preference is the value provided here, the
class `max_value` property value, then calculated from the present
sample.
Returns
-------
numpy.ndarray
"""
amplitude = numpy.abs(data)
out = numpy.empty(amplitude.shape, dtype='float64')
max_output_value = self.max_output_value
finite_mask = numpy.isfinite(amplitude)
zero_mask = (amplitude == 0)
use_mask = finite_mask & (~zero_mask)
out[~finite_mask] = max_output_value
out[zero_mask] = 0
if numpy.any(use_mask):
temp_data = amplitude[use_mask]
min_value, max_value = self._get_extrema(temp_data, min_value, max_value)
if min_value == max_value:
out[use_mask] = 0
else:
temp_data = (numpy.clip(temp_data, min_value, max_value) - min_value)/(max_value - min_value) + 1
out[use_mask] = max_output_value*numpy.log2(temp_data)
return out
def call(
self,
data: numpy.ndarray,
min_value: Optional[float] = None,
max_value: Optional[float] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output discrete version.
This method os directly called by the :func:`__call__` method, so the
class instance (once constructed) is itself callable, as follows:
>>> remap = Logarithmic()
>>> discrete_data = remap(data, min_value=1.8, max_value=1.2e6)
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
min_value : None|float
A minimum threshold, or pre-calculated data minimum, for consistent
global use. The order of preference is the value provided here, the
class `min_value` property value, then calculated from the present
sample.
max_value : None|float
A maximum value threshold, or pre-calculated data maximum, for consistent
global use. The order of preference is the value provided here, the
class `max_value` property value, then calculated from the present
sample.
Returns
-------
numpy.ndarray
"""
return clip_cast(
self.raw_call(data, min_value=min_value, max_value=max_value),
dtype=self.output_dtype, min_value=0, max_value=self.max_output_value)
def calculate_global_parameters_from_reader(
self,
reader: SICDTypeReader,
index: int = 0,
pixel_bounds: Union[None, tuple, list, numpy.ndarray] = None) -> None:
pixel_bounds = self._validate_pixel_bounds(reader, index, pixel_bounds)
self.min_value, self.max_value = get_data_extrema(
pixel_bounds, reader, index, 25*1024*1024, percentile=None)
class PEDF(MonochromaticRemap):
"""
A monochromatic piecewise extended density format remap.
"""
__slots__ = ('_override_name', '_bit_depth', '_dimension', '_density')
_name = 'pedf'
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None,
dmin: Union[int, float] = 30,
mmult: Union[int, float] = 40,
eps: float = 1e-5,
data_mean: Union[None, int, float] = None):
"""
Parameters
----------
override_name : None|str
Override name for a specific class instance
bit_depth : int
dmin : float|int
A dynamic range parameter. Lower this widens the range, will raising it
narrows the range. This was historically fixed at 30.
mmult : float|int
A contrast parameter. Low values will result is higher contrast and quicker
saturation, while high values will decrease contrast and slower saturation.
There is some balance between the competing effects in the `dmin` and `mmult`
parameters.
eps : float
small offset to create a nominal floor when mapping data containing 0's.
data_mean : None|float|int
The global data mean (for continuity). The appropriate value will be
calculated on a per calling array basis if not provided.
"""
MonochromaticRemap.__init__(
self, override_name=override_name, bit_depth=bit_depth, max_output_value=max_output_value)
self._density = Density(
bit_depth=bit_depth, max_output_value=max_output_value,
dmin=dmin, mmult=mmult, eps=eps, data_mean=data_mean)
@property
def are_global_parameters_set(self) -> bool:
"""
bool: Are (all) global parameters used for applying this remap function
set? In this case, this is the `data_mean` property.
"""
return self._density.are_global_parameters_set
def raw_call(
self,
data: numpy.ndarray,
data_mean: Optional[float] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output floating point
version, this is directly used by the :func:`call` method.
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
data_mean : None|float
The pre-calculated data mean, for consistent global use. The order
of preference is the value provided here, the class data_mean property
value, then the value calculated from the present sample.
Returns
-------
numpy.ndarray
"""
half_value = 0.5*self.max_output_value
out = self._density.raw_call(data, data_mean=data_mean)
top_mask = (out > half_value)
out[top_mask] = 0.5*(out[top_mask] + half_value)
return out
def call(
self,
data: numpy.ndarray,
data_mean: Optional[float] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output discrete version.
This method os directly called by the :func:`__call__` method, so the
class instance (once constructed) is itself callable, as follows:
>>> remap = PEDF()
>>> discrete_data = remap(data, data_mean=85.2)
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
data_mean : None|float
The pre-calculated data mean, for consistent global use. The order
of preference is the value provided here, the class data_mean property
value, then the value calculated from the present sample.
Returns
-------
numpy.ndarray
"""
return clip_cast(
self.raw_call(data, data_mean=data_mean),
dtype=self.output_dtype, min_value=0, max_value=self.max_output_value)
def calculate_global_parameters_from_reader(
self,
reader: SICDTypeReader,
index: int = 0,
pixel_bounds: Union[None, tuple, list, numpy.ndarray] = None) -> None:
self._density.calculate_global_parameters_from_reader(
reader, index=index, pixel_bounds=pixel_bounds)
class NRL(MonochromaticRemap):
"""
A monochromatic remap which is linear for percentile of the data, then
transitions to logarithmic.
"""
__slots__ = ('_override_name', '_bit_depth', '_dimension', '_knee', '_percentile', '_stats')
_name = 'nrl'
def __init__(
self,
override_name: Optional[str] = None,
bit_depth: int = 8,
max_output_value: Optional[int] = None,
knee: Optional[int] = None,
percentile: Union[int, float] = 99,
stats: Optional[Tuple[float, float, float]] = None):
"""
Parameters
----------
override_name : None|str
Override name for a specific class instance
bit_depth : int
knee : int
Where the knee for switching from linear to logarithmic occurs in the
colormap regime - this should be in keeping with bit-depth.
percentile : int|float
In the event that we are calculating the stats, which percentile
is the cut-off for lin-log switch-over?
stats : None|tuple
If provided, this should be of the form `(minimum, maximum, changeover)`.
"""
self._knee = None
self._percentile = None
self._stats = None
MonochromaticRemap.__init__(self, override_name=override_name, bit_depth=bit_depth, max_output_value=max_output_value)
self._set_knee(knee)
self._set_percentile(percentile)
self._set_stats(stats)
@property
def knee(self) -> float:
"""
float: The for switching from linear to logarithmic occurs in the colormap regime
"""
return self._knee
def _set_knee(self, knee: Optional[float]):
max_value = self.max_output_value
if knee is None:
knee = 0.8*max_value
knee = float(knee)
if not (0 < knee < max_value):
raise ValueError(
'In keeping with bit-depth, knee must take a value strictly '
'between 0 and {}'.format(max_value))
self._knee = knee
@property
def percentile(self) -> float:
"""
float: In the event that we are calculating the stats, which percentile
is the cut-off for lin-log switch-over?
"""
return self._percentile
def _set_percentile(self, percentile: Union[None, int, float]):
if percentile is None:
percentile = 99.0
else:
percentile = float(percentile)
if not (0 < percentile < 100):
raise ValueError('percentile must fall strictly between 0 and 100')
self._percentile = percentile
@property
def stats(self) -> Optional[Tuple[float, float, float]]:
"""
None|tuple: If populated, this is a tuple of the form `(minimum, maximum, changeover)`.
"""
return self._stats
def _set_stats(self, value: Optional[Tuple[float, float, float]]):
if value is None:
self._stats = None
else:
self._stats = self._validate_stats(None, value)
def _validate_stats(
self,
amplitude: Optional[numpy.ndarray],
stats: Optional[Tuple[float, float, float]]) -> Optional[Tuple[float, float, float]]:
if stats is None:
stats = self.stats
if stats is None and amplitude is not None:
stats = _nrl_stats(amplitude, self.percentile)
if stats is not None:
min_value = float(stats[0])
max_value = float(stats[1])
changeover_value = float(stats[2])
if not (min_value <= changeover_value <= max_value):
raise ValueError('Got inconsistent stats value `{}`'.format(stats))
stats = (min_value, max_value, changeover_value)
return stats
@property
def are_global_parameters_set(self) -> bool:
"""
bool: Are (all) global parameters used for applying this remap function
set? In this case, this is the `stats` property.
"""
return self._stats is not None
def raw_call(
self,
data: numpy.ndarray,
stats: Optional[Tuple[float, float, float]] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output floating point
version, this is directly used by the :func:`call` method.
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
stats : None|tuple
The stats `(minimum, maximum, chnageover)`, for consistent
global use. The order of preference is the value provided here, the
class `stats` property value, then calculated from the present
sample.
Returns
-------
numpy.ndarray
"""
max_index = self.max_output_value
amplitude = numpy.abs(data)
amplitude_min, amplitude_max, changeover = self._validate_stats(amplitude, stats)
out = numpy.empty(amplitude.shape, dtype='float64')
if amplitude_min == amplitude_max:
out[:] = 0
return out
linear_region = (amplitude <= changeover)
if changeover > amplitude_min:
out[linear_region] = numpy.clip(
self.knee*_linear_map(amplitude[linear_region], amplitude_min, changeover),
0,
max_index)
else:
logger.warning(
'The remap array is at least significantly constant, the nrl remap may return '
'strange results.')
out[linear_region] = 0
if changeover == amplitude_max:
out[~linear_region] = self.knee
else:
# calculate the log values
extreme_data = numpy.clip(amplitude[~linear_region], changeover, amplitude_max)
log_values = (extreme_data - changeover)/(amplitude_max - changeover) + 1
# this is now linearly scaled from 1 to 2, apply log_2 and then scale appropriately
out[~linear_region] = numpy.log2(log_values)*(max_index - self.knee) + self.knee
return out
def call(
self,
data: numpy.ndarray,
stats: Optional[Tuple[float, float, float]] = None) -> numpy.ndarray:
"""
This performs the mapping from input data to output discrete version.
This method os directly called by the :func:`__call__` method, so the
class instance (once constructed) is itself callable as follows:
>>> remap = NRL()
>>> discrete_data = remap(data, stats=(2.3, 1025.0, 997.2))
Parameters
----------
data : numpy.ndarray
The (presumably) complex data to remap.
stats : None|tuple
The stats `(minimum, maximum, chnageover)`, for consistent
global use. The order of preference is the value provided here, the
class `stats` property value, then calculated from the present
sample.
Returns
-------
numpy.ndarray
"""
return clip_cast(
self.raw_call(data, stats=stats),
dtype=self.output_dtype, min_value=0, max_value=self.max_output_value)
def calculate_global_parameters_from_reader(
self,
reader: SICDTypeReader,
index: int = 0,
pixel_bounds: Union[None, tuple, list, numpy.ndarray] = None) -> None:
pixel_bounds = self._validate_pixel_bounds(reader, index, pixel_bounds)
self._set_stats(
get_data_extrema(
pixel_bounds, reader, index, 25*1024*1024, percentile=self.percentile))
class LUT8bit(RemapFunction):
"""
A remap which uses a monochromatic remap function and an 8-bit lookup table
to produce a (color) image output
"""
__slots__ = ('_override_name', '_bit_depth', '_dimension', '_mono_remap', '_lookup_table')
_name = '_lut_8bit'
_allowed_dimension = None
def __init__(
self,
mono_remap: MonochromaticRemap,
lookup_table: Union[str, numpy.ndarray],
override_name: Optional[str] = None,
use_alpha: bool = False):
"""
Parameters
----------
mono_remap : MonochromaticRemap
The remap to apply before using the lookup table. Note that the `max_output_value`
and lookup_table first dimension size are required to be the same.
lookup_table : str|numpy.ndarray
A string name for a registered matplotlib colormap or the 256 element
rgb or rgba array.
override_name : None|str
Override name for a specific class instance. If this is not provided and
the `lookup_table` will be constructed from a matplotlib colormap name,
then that name will be used.
use_alpha : bool
Only used if `mono_remap` is the name of a matplotlib colormap, this
specifies whether or not to use the alpha channel.
"""
self._mono_remap = None
self._lookup_table = None
if override_name is None and isinstance(lookup_table, str):
override_name = lookup_table
RemapFunction.__init__(
self,
override_name=override_name,
bit_depth=8,
dimension=0)
# NB: dimension will be determined by the lookup table
self._set_mono_remap(mono_remap)
self._set_lookup_table(lookup_table, use_alpha)
def _set_dimension(self, value: int):
"""
The property is intended to be read-only.
Parameters
----------
value : int
"""
self._dimension = value
@property
def mono_remap(self) -> MonochromaticRemap:
"""
MonochromaticRemap: The monochromatic remap being used.
"""
return self._mono_remap
def _set_mono_remap(self, value: MonochromaticRemap):
if not isinstance(value, MonochromaticRemap):
raise ValueError('mono_remap requires a monochromatic remap instance')
self._mono_remap = value
@property
def lookup_table(self) -> numpy.ndarray:
"""
numpy.ndarray: The 8-bit lookup table.
"""
return self._lookup_table
def _set_lookup_table(
self,
value: Union[str, numpy.ndarray],
use_alpha: bool) -> None:
max_out_size = self.mono_remap.max_output_value
if isinstance(value, str):
if cm is None:
raise ImportError(
'The lookup_table has been specified by providing a matplotlib '
'colormap name, but matplotlib can not be imported.')
cmap = cm.get_cmap(value, max_out_size+1)
color_array = cmap(numpy.arange(max_out_size+1))
value = clip_cast(max_out_size*color_array, dtype='uint8')
if value.shape[1] > 3 and not use_alpha:
value = value[:, :3]
if not (isinstance(value, numpy.ndarray) and value.ndim == 2 and value.dtype.name == 'uint8'):
raise ValueError(
'lookup_table requires a two-dimensional numpy array of dtype = uint8')
if value.shape[0] != max_out_size+1:
raise ValueError(
'lookup_table size (first dimension) must agree with mono_remap.max_output_value')
self._lookup_table = value
self._dimension = value.shape[1]
@property
def are_global_parameters_set(self) -> bool:
"""
bool: Are (all) global parameters used for applying this remap function set?
"""
return self.mono_remap.are_global_parameters_set
def raw_call(
self,
data: numpy.ndarray,
**kwargs) -> numpy.ndarray:
"""
Contrary to monochromatic remaps, this is identical to :func:`call`.
Parameters
----------
data : numpy.ndarray
kwargs
The keyword arguments passed through to mono_remap.
Returns
-------
numpy.ndarray
"""
return self._lookup_table[self._mono_remap(data, **kwargs)]
def call(
self,
data: numpy.ndarray,
**kwargs) -> numpy.ndarray:
return self.raw_call(data, **kwargs)
def calculate_global_parameters_from_reader(
self,
reader: SICDTypeReader,
index: int = 0,
pixel_bounds: Union[None, tuple, list, numpy.ndarray] = None) -> None:
self.mono_remap.calculate_global_parameters_from_reader(
reader, index=index, pixel_bounds=pixel_bounds)
###########
# registration function for maintaining the list
def register_remap(
remap_function: Union[RemapFunction, Type],
overwrite: bool = False) -> None:
"""
Register a remap function for general usage.
Parameters
----------
remap_function : RemapFunction|Type
overwrite : bool
Should we overwrite any currently existing remap of the given name?
Returns
-------
None
"""
if isinstance(remap_function, type) and issubclass(remap_function, RemapFunction):
remap_function = remap_function()
if not isinstance(remap_function, RemapFunction):
raise TypeError('remap_function must be an instance of RemapFunction.')
remap_name = remap_function.name
if remap_name not in _REMAP_DICT:
_REMAP_DICT[remap_name] = remap_function
elif overwrite:
logger.info('Overwriting the remap {}'.format(remap_name))
_REMAP_DICT[remap_name] = remap_function
else:
logger.info('Remap {} already exists and is not being replaced'.format(remap_name))
def _register_defaults():
global _DEFAULTS_REGISTERED
if _DEFAULTS_REGISTERED:
return
register_remap(NRL(bit_depth=8), overwrite=False)
register_remap(Density(bit_depth=8), overwrite=False)
register_remap(High_Contrast(bit_depth=8), overwrite=False)
register_remap(Brighter(bit_depth=8), overwrite=False)
register_remap(Darker(bit_depth=8), overwrite=False)
register_remap(Linear(bit_depth=8), overwrite=False)
register_remap(Logarithmic(bit_depth=8), overwrite=False)
register_remap(PEDF(bit_depth=8), overwrite=False)
if cm is not None:
try:
register_remap(LUT8bit(NRL(bit_depth=8), 'viridis', use_alpha=False), overwrite=False)
except KeyError:
pass
try:
register_remap(LUT8bit(NRL(bit_depth=8), 'magma', use_alpha=False), overwrite=False)
except KeyError:
pass
try:
register_remap(LUT8bit(NRL(bit_depth=8), 'rainbow', use_alpha=False), overwrite=False)
except KeyError:
pass
try:
register_remap(LUT8bit(NRL(bit_depth=8), 'bone', use_alpha=False), overwrite=False)
except KeyError:
pass
_DEFAULTS_REGISTERED = True
def get_remap_names() -> List[str]:
"""
Gets a list of currently registered remap function names.
Returns
-------
List[str]
"""
if not _DEFAULTS_REGISTERED:
_register_defaults()
return list(_REMAP_DICT.keys())
def get_remap_list() -> List[Tuple[str, RemapFunction]]:
"""
Gets a list of currently registered remaps.
Returns
-------
List[Tuple[str, RemapFunction]]
List of tuples of the form `(<name>, <RemapFunction instance>)`.
"""
if not _DEFAULTS_REGISTERED:
_register_defaults()
# NB: this was originally implemented via inspection of the callable members
# of this module, but that ends up requiring more care in excluding
# undesirable elements than this method
return [(the_key, the_value) for the_key, the_value in _REMAP_DICT.items()]
def get_registered_remap(
remap_name: str,
default: Optional[RemapFunction] = None) -> RemapFunction:
"""
Gets a remap function from it's registered name.
Parameters
----------
remap_name : str
default : None|RemapFunction
Returns
-------
RemapFunction
Raises
------
KeyError
"""
if not _DEFAULTS_REGISTERED:
_register_defaults()
if remap_name in _REMAP_DICT:
return _REMAP_DICT[remap_name]
if default is not None:
return default
raise KeyError('Unregistered remap name `{}`'.format(remap_name))
#################
# DEPRECATED!
#################
# the original flat methods, maintained for a while
# for backwards compatibility
def density(data, data_mean=None):
"""
Standard set of parameters for density remap.
Parameters
----------
data : numpy.ndarray
The data to remap.
data_mean : None|float|int
Returns
-------
numpy.ndarray
"""
warnings.warn(
'the density() method is deprecated,\n\t'
'use the Density class, which is also callable', DeprecationWarning)
remapper = get_registered_remap('density')
return remapper(data, data_mean=data_mean)
def brighter(data, data_mean=None):
"""
Brighter set of parameters for density remap.
Parameters
----------
data : numpy.ndarray
data_mean : None|float|int
Returns
-------
numpy.ndarray
"""
warnings.warn(
'the brighter() method is deprecated,\n\t'
'use the Brighter class, which is also callable', DeprecationWarning)
remapper = get_registered_remap('brighter')
return remapper(data, data_mean=data_mean)
def darker(data, data_mean=None):
"""
Darker set of parameters for density remap.
Parameters
----------
data : numpy.ndarray
data_mean : None|float|int
Returns
-------
numpy.ndarray
"""
warnings.warn(
'the darker() method is deprecated,\n\t'
'use the Darker class, which is also callable', DeprecationWarning)
remapper = get_registered_remap('darker')
return remapper(data, data_mean=data_mean)
def high_contrast(data, data_mean=None):
"""
Increased contrast set of parameters for density remap.
Parameters
----------
data : numpy.ndarray
data_mean : None|float|int
Returns
-------
numpy.ndarray
"""
warnings.warn(
'the high_contrast() method is deprecated,\n\t'
'use the HighContrast class, which is also callable', DeprecationWarning)
remapper = get_registered_remap('high_contrast')
return remapper(data, data_mean=data_mean)
def linear(data, min_value=None, max_value=None):
"""
Linear remap - just the magnitude.
Parameters
----------
data : numpy.ndarray
min_value : None|float
The minimum allowed value for the dynamic range.
max_value : None|float
The maximum allowed value for the dynamic range.
Returns
-------
numpy.ndarray
"""
warnings.warn(
'the linear() method is deprecated,\n\t'
'use the Linear class, which is also callable', DeprecationWarning)
remapper = get_registered_remap('linear')
return remapper(data, min_value=min_value, max_value=max_value)
def log(data, min_value=None, max_value=None):
"""
Logarithmic remap.
Parameters
----------
data : numpy.ndarray
min_value : None|float
The minimum allowed value for the dynamic range.
max_value : None|float
The maximum allowed value for the dynamic range.
Returns
-------
numpy.ndarray
"""
warnings.warn(
'the log() method is deprecated,\n\t'
'use the Logarithmic class, which is also callable', DeprecationWarning)
remapper = get_registered_remap('log')
return remapper(data, min_value=min_value, max_value=max_value)
def pedf(data, data_mean=None):
"""
Piecewise extended density format remap.
Parameters
----------
data : numpy.ndarray
The array to be remapped.
data_mean : None|float|int
Returns
-------
numpy.ndarray
"""
warnings.warn(
'the pedf() method is deprecated,\n\t'
'use the PEDF class, which is also callable', DeprecationWarning)
remapper = get_registered_remap('pedf')
return remapper(data, data_mean=data_mean)
def nrl(data, stats=None):
"""
A lin-log style remap.
Parameters
----------
data : numpy.ndarray
The data array to remap
stats : None|tuple
This is calculated if not provided. Expected to be of the form
`(minimum, maximum, 99th percentile)`.
Returns
-------
numpy.ndarray
"""
warnings.warn(
'the nrl() method is deprecated,\n\t'
'use the NRL class, which is also callable', DeprecationWarning)
remapper = get_registered_remap('nrl')
return remapper(data, stats=stats)
| 61,734 | 30.904393 | 126 | py |
sarpy | sarpy-master/sarpy/utils/create_kmz.py | """
Create kmz products based on CPHD or SICD.
For a basic help on the command-line, check
>>> python -m sarpy.utils.create_kmz --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import argparse
import logging
import os
import sarpy
from sarpy.io.complex.converter import open_complex
import sarpy.io.general.base
import sarpy.io.phase_history
from sarpy.visualization.kmz_product_creation import create_kmz_view
from sarpy.visualization.cphd_kmz_product_creation import cphd_create_kmz_view
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Create KMZ product from a CPHD or Complex Image.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input_file', metavar='input_file',
help='Path input data file, or directory for radarsat, RCM, or sentinel.\n'
'* For radarsat or RCM, this can be the product.xml file, or parent directory\n'
' of product.xml or metadata/product.xml.\n'
'* For sentinel, this can be the manifest.safe file, or parent directory of\n'
' manifest.safe.\n')
parser.add_argument(
'output_directory', metavar='output_directory',
help='Path to the output directory where the product file(s) will be created.\n'
'This directory MUST exist.\n'
'* Depending on the input file, multiple product files may be produced.\n'
'* The name for the output file(s) will be chosen based on CoreName and\n '
' transmit/collect polarization.\n')
parser.add_argument(
'-s', '--size', default=3072, type=int, help='Maximum size for the interpolated image, put -1 for full size')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Verbose (level="INFO") logging?')
args = parser.parse_args()
level = 'INFO' if args.verbose else 'WARNING'
logging.basicConfig(level=level)
logger = logging.getLogger('sarpy')
logger.setLevel(level)
file_stem = 'View-' + os.path.splitext(os.path.split(args.input_file)[1])[0]
try:
reader = sarpy.io.phase_history.open(args.input_file)
cphd_create_kmz_view(reader, args.output_directory, file_stem=file_stem)
except sarpy.io.general.base.SarpyIOError:
reader = open_complex(args.input_file)
pixel_limit = None if args.size == -1 else args.size
create_kmz_view(reader, args.output_directory, pixel_limit=pixel_limit, file_stem=file_stem)
| 2,529 | 39.15873 | 117 | py |
sarpy | sarpy-master/sarpy/utils/nitf_utils.py | """
A utility for dumping a NITF header to the console. Contributed by Austin Lan of L3/Harris.
To dump NITF header information to a text file from the command-line
>>> python -m sarpy.utils.nitf_utils <path to nitf file>
For a basic help on the command-line, check
>>> python -m sarpy.utils.nitf_utils --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Austin Lan, L3/Harris"
import argparse
import functools
import sys
from xml.dom import minidom
import os
from typing import Union, BinaryIO, TextIO, List, Dict
from io import StringIO
from sarpy.io.general.nitf import NITFDetails
from sarpy.io.general.nitf_elements.base import NITFElement, TRE, TREList, UserHeaderType
from sarpy.io.general.nitf_elements.des import DataExtensionHeader, DataExtensionHeader0, \
DESUserHeader
from sarpy.io.general.nitf_elements.graphics import GraphicsSegmentHeader
from sarpy.io.general.nitf_elements.image import ImageSegmentHeader, ImageSegmentHeader0, MaskSubheader
from sarpy.io.general.nitf_elements.label import LabelSegmentHeader
from sarpy.io.general.nitf_elements.nitf_head import NITFHeader, NITFHeader0
from sarpy.io.general.nitf_elements.res import ReservedExtensionHeader, ReservedExtensionHeader0, \
RESUserHeader
from sarpy.io.general.nitf_elements.symbol import SymbolSegmentHeader
from sarpy.io.general.nitf_elements.text import TextSegmentHeader, TextSegmentHeader0
from sarpy.io.general.nitf_elements.tres.tre_elements import TREElement
# Custom print function
print_func = print
############
# helper methods
def _filter_files(input_path):
"""
Determine if a given input path corresponds to a NITF 2.1 or 2.0 file.
Parameters
----------
input_path : str
Returns
-------
bool
"""
if not os.path.isfile(input_path):
return False
_, fext = os.path.splitext(input_path)
with open(input_path, 'rb') as fi:
check = fi.read(9)
return check in [b'NITF02.10', b'NITF02.00']
def _create_default_output_file(input_file, output_directory=None):
if not isinstance(input_file, str):
if output_directory is None:
return os.path.expanduser('~/Desktop/header_dump.txt')
else:
return os.path.join(output_directory, 'header_dump.txt')
if output_directory is None:
return os.path.splitext(input_file)[0] + '.header_dump.txt'
else:
return os.path.join(output_directory, os.path.splitext(os.path.split(input_file)[1])[0] + '.header_dump.txt')
def _decode_effort(value):
# type: (bytes) -> Union[bytes, str]
# noinspection PyBroadException
try:
return value.decode()
except Exception:
return value
############
# printing methods
def _print_element_field(elem, field, prefix=''):
# type: (Union[None, NITFElement], Union[None, str], str) -> None
if elem is None or field is None:
return
value = getattr(elem, field, None)
if value is None:
value = ''
print_func('{}{} = {}'.format(prefix, field, value))
def _print_element(elem, prefix=''):
# type: (Union[None, NITFElement], str) -> None
if elem is None:
return
# noinspection PyProtectedMember
for field in elem._ordering:
_print_element_field(elem, field, prefix=prefix)
def _print_element_list(elem_list, prefix=''):
# type: (Union[None, List[NITFElement]], str) -> None
if elem_list is None:
return
for i, elem in enumerate(elem_list):
_print_element(elem, prefix='{}[{}].'.format(prefix, i))
def _print_tre_element(field, value, prefix=''):
# type: (Union[None, str], Union[str, int, bytes], str) -> None
if field is None:
return
if value is None:
value = ''
print_func('{}{} = {}'.format(prefix, field, value))
def _print_tre_list(elem_list, prefix=''):
# type: (Union[None, List, TREList], str) -> None
if elem_list is None:
return
for i, elem in enumerate(elem_list):
_print_tre_dict(elem, '{}[{}].'.format(prefix, i))
def _print_tre_dict(elem_dict, prefix=''):
# type: (Union[None, Dict], str) -> None
if elem_dict is None:
return
for field, value in elem_dict.items():
if isinstance(value, list):
_print_tre_list(value, '{}{}'.format(prefix, field))
else:
_print_tre_element(field, value, prefix)
def _print_tres(tres):
# type: (Union[TREList, List[TRE]]) -> None
for tre in tres:
print_func('')
if isinstance(tre.DATA, TREElement):
_print_tre_dict(tre.DATA.to_dict(), prefix='{}.'.format(tre.TAG))
else:
# Unknown TRE
_print_tre_element('DATA', _decode_effort(tre.DATA), prefix='{}.'.format(tre.TAG))
def _print_file_header(hdr):
# type: (Union[NITFHeader, NITFHeader0]) -> None
# noinspection PyProtectedMember
for field in hdr._ordering:
if field == 'Security':
_print_element(getattr(hdr, field, None), prefix='FS')
elif field == 'FBKGC':
value = getattr(hdr, field, None)
print_func('FBKGC = {} {} {}'.format(value[0], value[1], value[2]))
elif field in [
'ImageSegments', 'GraphicsSegments', 'SymbolSegments', 'LabelSegments',
'TextSegments', 'DataExtensions', 'ReservedExtensions']:
pass
elif field in ['UserHeader', 'ExtendedHeader']:
value = getattr(hdr, field, None)
assert(isinstance(value, UserHeaderType))
if value and value.data and value.data.tres:
_print_tres(value.data.tres)
else:
_print_element_field(hdr, field)
def _print_mask_header(hdr):
# type: (Union[None, MaskSubheader]) -> None
if hdr is None:
return
print_func('----- Mask Subheader (part of image data segment) -----')
# noinspection PyProtectedMember
for field in hdr._ordering:
if field in ['BMR', 'TMR']:
value = getattr(hdr, field, None)
if value is None:
continue
else:
for the_band, subarray in enumerate(value):
print_func('{}BND{} = {}'.format(field, the_band, subarray))
else:
_print_element_field(hdr, field, prefix='')
def _print_image_header(hdr):
# type: (Union[ImageSegmentHeader, ImageSegmentHeader0]) -> None
# noinspection PyProtectedMember
for field in hdr._ordering:
if field == 'Security':
_print_element(getattr(hdr, field, None), prefix='IS')
elif field in ['Comments', 'Bands']:
_print_element_list(getattr(hdr, field, None), prefix='{}'.format(field))
elif field in ['UserHeader', 'ExtendedHeader']:
value = getattr(hdr, field, None)
assert(isinstance(value, UserHeaderType))
if value and value.data and value.data.tres:
_print_tres(value.data.tres)
else:
_print_element_field(hdr, field)
_print_mask_header(hdr.mask_subheader)
def _print_basic_header(hdr, prefix):
# noinspection PyProtectedMember
for field in hdr._ordering:
if field == 'Security':
_print_element(getattr(hdr, field, None), prefix=prefix)
elif field in ['UserHeader', 'ExtendedHeader']:
value = getattr(hdr, field, None)
assert(isinstance(value, UserHeaderType))
if value and value.data and value.data.tres:
_print_tres(value.data.tres)
else:
_print_element_field(hdr, field)
def _print_graphics_header(hdr):
# type: (GraphicsSegmentHeader) -> None
_print_basic_header(hdr, 'SS')
def _print_symbol_header(hdr):
# type: (SymbolSegmentHeader) -> None
_print_basic_header(hdr, 'SS')
def _print_label_header(hdr):
# type: (LabelSegmentHeader) -> None
_print_basic_header(hdr, 'LS')
def _print_text_header(hdr):
# type: (Union[TextSegmentHeader, TextSegmentHeader0]) -> None
_print_basic_header(hdr, 'TS')
def _print_extension_header(hdr, prefix):
# noinspection PyProtectedMember
for field in hdr._ordering:
if field == 'Security':
_print_element(getattr(hdr, field, None), prefix=prefix)
elif field in ['UserHeader', 'ExtendedHeader']:
value = getattr(hdr, field, None)
if isinstance(value, (DESUserHeader, RESUserHeader)):
if value.data:
# Unknown user-defined subheader
print_func('{}SHF = {}'.format(prefix, _decode_effort(value.data)))
else:
# e.g., XMLDESSubheader
_print_element(value, prefix='{}SHF.'.format(prefix))
else:
_print_element_field(hdr, field)
def _print_des_header(hdr):
# type: (Union[DataExtensionHeader, DataExtensionHeader0]) -> None
_print_extension_header(hdr, 'DES')
def _print_res_header(hdr):
# type: (Union[ReservedExtensionHeader, ReservedExtensionHeader0]) -> None
_print_extension_header(hdr, 'RES')
def print_nitf(file_name, dest=sys.stdout):
"""
Worker function to dump the NITF header and various subheader details to the
provided destination.
Parameters
----------
file_name : str|BinaryIO
dest : TextIO
"""
# Configure print function for desired destination
# - e.g., stdout, string buffer, file
global print_func
print_func = functools.partial(print, file=dest)
details = NITFDetails(file_name)
if isinstance(file_name, str):
print_func('')
print_func('Details for file {}'.format(file_name))
print_func('')
print_func('----- File Header -----')
_print_file_header(details.nitf_header)
print_func('')
if details.img_subheader_offsets is not None:
for img_subhead_num in range(details.img_subheader_offsets.size):
print_func('----- Image {} -----'.format(img_subhead_num))
hdr = details.parse_image_subheader(img_subhead_num)
_print_image_header(hdr)
print_func('')
if details.graphics_subheader_offsets is not None:
for graphics_subhead_num in range(details.graphics_subheader_offsets.size):
print_func('----- Graphic {} -----'.format(graphics_subhead_num))
hdr = details.parse_graphics_subheader(graphics_subhead_num)
_print_graphics_header(hdr)
data = details.get_graphics_bytes(graphics_subhead_num)
print_func('GSDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.symbol_subheader_offsets is not None:
for symbol_subhead_num in range(details.symbol_subheader_offsets.size):
print_func('----- Symbol {} -----'.format(symbol_subhead_num))
hdr = details.parse_symbol_subheader(symbol_subhead_num)
_print_symbol_header(hdr)
data = details.get_symbol_bytes(symbol_subhead_num)
print_func('SSDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.label_subheader_offsets is not None:
for label_subhead_num in range(details.label_subheader_offsets.size):
print_func('----- Label {} -----'.format(label_subhead_num))
hdr = details.parse_label_subheader(label_subhead_num)
_print_label_header(hdr)
data = details.get_label_bytes(label_subhead_num)
print_func('LSDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.text_subheader_offsets is not None:
for text_subhead_num in range(details.text_subheader_offsets.size):
print_func('----- Text {} -----'.format(text_subhead_num))
hdr = details.parse_text_subheader(text_subhead_num)
_print_text_header(hdr)
data = details.get_text_bytes(text_subhead_num)
print_func('TSDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.des_subheader_offsets is not None:
for des_subhead_num in range(details.des_subheader_offsets.size):
print_func('----- DES {} -----'.format(des_subhead_num))
hdr = details.parse_des_subheader(des_subhead_num)
_print_des_header(hdr)
data = details.get_des_bytes(des_subhead_num)
des_id = hdr.DESID if details.nitf_version == '02.10' else hdr.DESTAG
if des_id.strip() in ['XML_DATA_CONTENT', 'SICD_XML', 'SIDD_XML']:
xml_str = minidom.parseString(
data.decode()).toprettyxml(indent=' ', newl='\n')
# NB: this may or not exhibit platform dependent choices in which codec (i.e. latin-1 versus utf-8)
print_func('DESDATA =')
for line_num, xml_entry in enumerate(xml_str.splitlines()):
if line_num == 0:
# Remove xml that gets inserted by minidom, if it's not actually there
if (not data.startswith(b'<?xml version')) and xml_entry.startswith('<?xml version'):
continue
print_func(xml_entry)
elif xml_entry.strip() != '':
# Remove extra new lines if XML is already formatted
print_func(xml_entry)
elif des_id.strip() in ['TRE_OVERFLOW', 'Registered Extensions', 'Controlled Extensions']:
tres = TREList.from_bytes(data, 0)
print_func('DESDATA = ')
_print_tres(tres)
else:
# Unknown user-defined data
print_func('DESDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.res_subheader_offsets is not None:
for res_subhead_num in range(details.res_subheader_offsets.size):
print_func('----- RES {} -----'.format(res_subhead_num))
hdr = details.parse_res_subheader(res_subhead_num)
_print_res_header(hdr)
data = details.get_res_bytes(res_subhead_num)
print_func('RESDATA = {}'.format(_decode_effort(data)))
print_func('')
##########
# method for dumping file using the print method(s)
def dump_nitf_file(file_name, dest, over_write=True):
"""
Utility to dump the NITF header and various subheader details to a configurable
destination.
Parameters
----------
file_name : str|BinaryIO
The path to or file-like object containing a NITF 2.1 or 2.0 file.
dest : str
'stdout', 'string', 'default' (will use `file_name+'.header_dump.txt'`),
or the path to an output file.
over_write : bool
If `True`, then overwrite the destination file, otherwise append to the
file.
Returns
-------
None|str
There is only a return value if `dest=='string'`.
"""
if dest == 'stdout':
print_nitf(file_name, dest=sys.stdout)
return
if dest == 'string':
out = StringIO()
print_nitf(file_name, dest=out)
value = out.getvalue()
out.close() # free the buffer
return value
the_out_file = _create_default_output_file(file_name) if dest == 'default' else dest
if not os.path.exists(the_out_file) or over_write:
with open(the_out_file, 'w') as the_file:
print_nitf(file_name, dest=the_file)
else:
with open(the_out_file, 'a') as the_file:
print_nitf(file_name, dest=the_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Utility to dump NITF 2.1 or 2.0 headers.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input_file',
help='The path to a nitf file, or directory to search for NITF files.')
parser.add_argument(
'-o', '--output', default='default',
help="'default', 'stdout', or the path for an output file.\n"
"* 'default', the output will be at '<input path>.header_dump.txt' \n"
" This will be overwritten, if it exists.\n"
"* 'stdout' will print the information to standard out.\n"
"* Otherwise, "
" if `input_file` is a directory, this is expected to be the path to\n"
" an output directory for the output following the default naming scheme.\n"
"* if `input_file` a file path, this is expected to be the path to a file \n"
" and output will be written there.\n"
" In either case, existing output files will be overwritten.")
args = parser.parse_args()
if os.path.isdir(args.input_file):
entries = [os.path.join(args.input_file, part) for part in os.listdir(args.input_file)]
for file_number, entry in enumerate(filter(_filter_files, entries)):
if args.output == 'stdout':
output = args.output
elif args.output == 'default':
output = _create_default_output_file(entry, output_directory=None)
else:
if not os.path.isdir(args.output):
raise IOError(
'Provided input is a directory, so provided output must '
'be a directory, `stdout`, or `default`.')
output = _create_default_output_file(entry, output_directory=args.output)
dump_nitf_file(entry, output)
else:
dump_nitf_file(args.input_file, args.output)
| 17,560 | 35.283058 | 117 | py |
sarpy | sarpy-master/sarpy/utils/create_product.py | """
Create products based on SICD type reader.
For a basic help on the command-line, check
>>> python -m sarpy.utils.create_product --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import argparse
import logging
import sarpy
from sarpy.io.complex.converter import open_complex
from sarpy.processing.ortho_rectify import BivariateSplineMethod, NearestNeighborMethod
from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, \
create_csi_sidd, create_dynamic_image_sidd
def _parse_method(method):
if method.startswith('spline_'):
return int(method[-1])
else:
return 'nearest'
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Create derived product is SIDD format from a SICD type file.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input_file', metavar='input_file',
help='Path input data file, or directory for radarsat, RCM, or sentinel.\n'
'* For radarsat or RCM, this can be the product.xml file, or parent directory\n'
' of product.xml or metadata/product.xml.\n'
'* For sentinel, this can be the manifest.safe file, or parent directory of\n'
' manifest.safe.\n')
parser.add_argument(
'output_directory', metavar='output_directory',
help='Path to the output directory where the product file(s) will be created.\n'
'This directory MUST exist.\n'
'* Depending on the input file, multiple product files may be produced.\n'
'* The name for the output file(s) will be chosen based on CoreName and\n '
' transmit/collect polarization.\n')
parser.add_argument(
'-t', '--type', default='detected', choices=['detected', 'csi', 'dynamic'],
help="The type of derived product.")
parser.add_argument(
'-m', '--method', default='nearest', choices=['nearest', ]+['spline_{}'.format(i) for i in range(1, 6)],
help="The interpolation method.")
parser.add_argument(
'--version', default=2, type=int, choices=[1, 2],
help="The version of the SIDD standard used.")
parser.add_argument(
'-v', '--verbose', action='store_true', help='Verbose (level="INFO") logging?')
parser.add_argument(
'-s', '--sicd', action='store_true', help='Include the SICD structure in the SIDD?')
args = parser.parse_args()
level = 'INFO' if args.verbose else 'WARNING'
logging.basicConfig(level=level)
logger = logging.getLogger('sarpy')
logger.setLevel(level)
reader = open_complex(args.input_file)
degree = _parse_method(args.method)
for i, sicd in enumerate(reader.get_sicds_as_tuple()):
if isinstance(degree, int):
ortho_helper = BivariateSplineMethod(reader, index=i, row_order=degree, col_order=degree)
else:
ortho_helper = NearestNeighborMethod(reader, index=i)
if args.type == 'detected':
create_detected_image_sidd(ortho_helper, args.output_directory, version=args.version, include_sicd=args.sicd)
elif args.type == 'csi':
create_csi_sidd(ortho_helper, args.output_directory, version=args.version, include_sicd=args.sicd)
elif args.type == 'dynamic':
create_dynamic_image_sidd(ortho_helper, args.output_directory, version=args.version, include_sicd=args.sicd)
else:
raise ValueError('Got unhandled type {}'.format(args.type))
| 3,545 | 41.214286 | 121 | py |
sarpy | sarpy-master/sarpy/utils/nominal_sicd_noise.py | """
Add a nominal noise polynomial to a sicd.
For a basic help on the command-line, check
>>> python -m sarpy.utils.nominal_sicd_noise --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import os
import argparse
import numpy
import sarpy
from sarpy.io.complex.sicd import SICDReader
from sarpy.io.complex.converter import conversion_utility
from sarpy.io.complex.sicd_elements.Radiometric import NoiseLevelType_
logger = logging.getLogger(__name__)
def nominal_sicd_noise(
input_reader, out_directory, output_file=None, noise_db_value=-16.0, override=False,
check_existence=True, check_older_version=False, preserve_nitf_information=False):
"""
Create a sicd with the nominal noise value.
Parameters
----------
input_reader : str|SICDReader
out_directory : str
The directory of the output.
output_file : None|str
If `None`, then the name will mirror the original with noise details appended.
noise_db_value : int|float
The estimate for nesz in decibels.
override : bool
If a NoisePoly is already populated, should we override the value? If `False`
and a NoisePoly is already populated, then an exception will be raised.
check_existence : bool
Check for the existence of the file before overwriting?
check_older_version : bool
Try to use a less recent version of SICD (1.1), for possible application
compliance issues?
preserve_nitf_information : bool
Try to preserve some of the original NITF information?
"""
if isinstance(input_reader, str):
input_reader = SICDReader(input_reader)
if not isinstance(input_reader, SICDReader):
raise TypeError('We require that the input is a SICD reader or path to a sicd file.')
noise_db_value = float(noise_db_value)
if noise_db_value > -4:
logger.warning(
'The noise estimate should be provided in dB,\n\t'
'and the provided value is `{}`.\n\t'
'Maybe this is an error?'.format(noise_db_value))
if output_file is None:
fname = os.path.split(input_reader.file_name)[1]
fstem, fext = os.path.splitext(fname)
fstem += '_{}dB_noise'.format(int(noise_db_value))
fname = fstem + fext
else:
fname = os.path.split(output_file)[1]
sicd = input_reader.sicd_meta
if sicd.Radiometric is None:
raise ValueError(
'The provided sicd does not contain any radiometric information,\n\t'
'and SigmaZeroSFPoly is required to proceed')
new_noise = NoiseLevelType_(
NoisePoly=[[noise_db_value - 10 * numpy.log10(sicd.Radiometric.SigmaZeroSFPoly[0, 0]), ]],
NoiseLevelType='ABSOLUTE')
if sicd.Radiometric.NoiseLevel is None:
original_noise = None
else:
if not override:
raise ValueError(
'The provided sicd already contains radiometric noise information,\n\t'
'set override=True to replace the value')
original_noise = sicd.Radiometric.NoiseLevel.copy()
sicd.Radiometric.NoiseLevel = new_noise
conversion_utility(
input_reader, out_directory, output_files=fname,
check_existence=check_existence,
check_older_version=check_older_version,
preserve_nitf_information=preserve_nitf_information)
# return to the original noise information, so we haven't modified any in memory reader information
sicd.Radiometric.NoiseLevel = original_noise
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Subset SICD file.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input_file', metavar='input_file',
help='Path input sicd data file.')
parser.add_argument(
'output_directory', metavar='output_directory',
help='Path to the output directory. This directory MUST exist.')
parser.add_argument(
'--override', action='store_true',
help='Override any present Noise polynomial?')
parser.add_argument(
'-o', '--output_file', default=None, type=str)
parser.add_argument(
'-n', '--noise', default=-16.0, type=float,
help='Nominal nesz noise value in decibels')
parser.add_argument(
'-p', '--preserve', action='store_true',
help='Try to preserve any NITF information?\n'
'This only applies in the event that the file being read is a NITF')
parser.add_argument(
'-w', '--overwrite', action='store_true',
help='Overwrite output file, if it already exists?')
parser.add_argument(
'--older', action='store_true',
help='Try to use a less recent version of SICD (1.1),\n'
'for possible application compliance issues?')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Verbose (level="INFO") logging?')
args = parser.parse_args()
level = 'INFO' if args.verbose else 'WARNING'
logging.basicConfig(level=level)
logger = logging.getLogger('sarpy')
logger.setLevel(level)
nominal_sicd_noise(
args.input_file, args.output_directory, output_file=args.output_file,
noise_db_value=args.noise,
override=args.override,
check_existence=not args.overwrite,
check_older_version=args.older,
preserve_nitf_information=args.preserve)
| 5,484 | 35.566667 | 103 | py |
sarpy | sarpy-master/sarpy/utils/convert_to_sicd.py | """
Convert from complex SAR image format to SICD format.
For a basic help on the command-line, check
>>> python -m sarpy.utils.convert_to_sicd --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Valkyrie Systems Corporation")
import argparse
import logging
import sarpy
from sarpy.io.complex.converter import conversion_utility
def convert(input_file, output_dir, preserve_nitf_information=False,
dem_filename_pattern=None, dem_type=None, geoid_file=None):
"""
Parameters
----------
input_file : str
Path to the input file.
output_dir : str
Output directory path.
preserve_nitf_information : bool
Try to preserve NITF information? This only applies in the case that the
file being read is actually a NITF file.
dem_filename_pattern : str | None
Optional string specifying a Digital Elevation Model (DEM) filename pattern.
This is a format string that specifies a glob pattern that will
uniquely specify a DEM file from the Lat/Lon of the SW corner of
the DEM tile. See the convert_to_sicd help text for more details.
dem_type : str | None
Optional DEM type ('GeoTIFF', 'GeoTIFF:WGS84', 'GeoTIFF:EGM2008', etc.).
This parameter is required when dem_filename_pattern is specified. For 'GeoTIFF'
DEM files, the reference surface can be either WGS84 or any of the geoid models.
The reference surface is appended to the DEM type with a ':' separator. If the
reference surface is not specified, then EGM2008 is assumed.
geoid_file : str | None
Optional Geoid file which might be needed when dem_filename_pattern is specified.
"""
conversion_utility(input_file, output_dir, preserve_nitf_information=preserve_nitf_information,
dem_filename_pattern=dem_filename_pattern, dem_type=dem_type, geoid_file=geoid_file)
if __name__ == '__main__':
epilog = ('Note:\n'
'The DEM files must have the SW corner Lat/Lon encoded in their filenames.\n'
'The --dem-path-pattern argument contains a format string that when populated will\n'
'create as glob pattern that will specify the desired DEM file. The following\n'
'arguments are provided to the format string.\n'
' lat = int(numpy.floor(lat))\n'
' lon = int(numpy.floor(lon))\n'
' abslat = int(abs(numpy.floor(lat)))\n'
' abslon = int(abs(numpy.floor(lon)))\n'
' ns = "s" if lat < 0 else "n"\n'
' NS = "S" if lat < 0 else "N"\n'
' ew = "w" if lon < 0 else "e"\n'
' EW = "W" if lon < 0 else "E"\n'
'\n'
'For example (with Linux file separators) the following specifies a GeoTIFF DEM pattern:\n'
' /dem_root/tdt_{ns}{abslat:02}{ew}{abslon:03}_*/DEM/TDT_{NS}{abslat:02}{EW}{abslon:03}_*_DEM.tif\n'
'\n'
'In theory, one could use a simple format string using wildcard characters like this:\n'
' /dem_root/**/*{NS}{abslat:02}{EW}{abslon:03}*DEM.tif\n'
'\n'
'However, this would be unwise since glob might have to scan through many files and directories\n'
'to find the desired file. This could be quite time consuming if there are many files in dem_root.\n'
)
parser = argparse.ArgumentParser(description="Convert to SICD format.",
formatter_class=argparse.RawTextHelpFormatter,
epilog=epilog)
parser.add_argument(
'input_file', metavar='input_file',
help='Path input data file, or directory for radarsat, RCM, sentinel or other systems.\n'
'* For radarsat or RCM, this can be the product.xml file, or parent directory\n'
' of product.xml or metadata/product.xml.\n'
'* For sentinel, this can be the manifest.safe file, or parent directory of\n'
' manifest.safe.\n')
parser.add_argument(
'output_directory', metavar='output_directory',
help='Path to the output directory. This directory MUST exist.\n'
'* Depending on the input details, multiple SICD files may be produced.\n'
'* The name for the output file(s) will be chosen based on CoreName and\n '
' transmit/collect polarization.\n')
parser.add_argument(
'-p', '--preserve', action='store_true',
help='Try to preserve any NITF information?\n'
'This only applies in the event that the file being read is a NITF')
parser.add_argument(
'-d', '--dem-filename-pattern',
help='Optional string specifying a Digital Elevation Model (DEM) filename pattern.\n'
'This is a format string that specifies a glob pattern that will\n'
'uniquely specify a DEM file from the Lat/Lon of the SW corner of\n'
'the DEM tile. See the note below for more details.\n')
parser.add_argument(
'-t', '--dem-type',
help=('Optional DEM type ("GeoTIFF", etc.).\n'
'This parameter is required when dem-path-pattern is specified.\n'))
parser.add_argument(
'-g', '--geoid-file',
help='Optional path to a geoid definition file.\n'
'A geoid definition file is required when dem-path-pattern is specified\n'
'and the DEM height values are relative to a geoid.\n')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Verbose (level="INFO") logging?')
args = parser.parse_args()
level = 'INFO' if args.verbose else 'WARNING'
logging.basicConfig(level=level)
logger = logging.getLogger('sarpy')
logger.setLevel(level)
convert(args.input_file, args.output_directory, preserve_nitf_information=args.preserve,
dem_filename_pattern=args.dem_filename_pattern, dem_type=args.dem_type, geoid_file=args.geoid_file)
| 6,083 | 48.463415 | 117 | py |
sarpy | sarpy-master/sarpy/utils/review_class.py | from __future__ import print_function
import sys
import functools
from collections import defaultdict
import pkgutil
from importlib import import_module
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
print_func = print
def traverse_module_classification(package_name, results_dict):
def evaluate(the_module, the_name):
class_str = getattr(the_module, '__classification__', '__NO_CLASSIFICATION__')
results_dict[class_str].append(the_name)
module = import_module(package_name)
if hasattr(module, '__path__'):
for details in pkgutil.walk_packages(module.__path__, package_name+'.'):
_, module_name, is_pkg = details
if is_pkg:
# don't evaluate the presence of class string for packages
continue
# noinspection PyBroadException
sub_module = import_module(module_name)
evaluate(sub_module, module_name)
else:
evaluate(module, package_name)
def check_classification(parent_package, results_dict=None):
if results_dict is None:
results_dict = defaultdict(list)
traverse_module_classification(parent_package, results_dict)
return results_dict
def log_package_classification(parent_package, dest=sys.stdout):
global print_func
print_func = functools.partial(print, file=dest)
results_dict = check_classification(parent_package)
for class_str in sorted(results_dict.keys()):
print_func(class_str)
for entry in results_dict[class_str]:
print_func('\t', entry)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Utility to create a report for displaying package __classification__ values',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-p', '--package', default='sarpy',
help="package or module name, should be a subpackage of sarpy")
parser.add_argument('-o', '--output', default='stdout',
help="'stdout', 'string', or an output file")
args = parser.parse_args()
if args.output == 'stdout':
# Send output to stdout
log_package_classification(args.package, dest=sys.stdout)
else:
# Send output to file
with open(args.output, 'w') as f:
log_package_classification(args.package, dest=f)
| 2,415 | 31.648649 | 98 | py |
sarpy | sarpy-master/sarpy/utils/chip_sicd.py | """
Create a chip (subimage) of a SICD image.
For a basic help on the command-line, check
>>> python -m sarpy.utils.chip_sicd --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "John Gorman"
import os
import argparse
from typing import Tuple
import logging
import sarpy
from sarpy.io.complex.converter import conversion_utility
from sarpy.io.complex.sicd import SICDReader
def _verify_limits(limits):
"""
Helper function to verify that the row/column limits are sensible.
Parameters
----------
limits : None|tuple|list
Returns
-------
None|tuple
"""
if limits is None:
return limits
temp_limits = [int(entry) for entry in limits]
if len(temp_limits) != 2:
raise ValueError('Got unexpected limits `{}`'.format(limits))
if not (0 <= temp_limits[0] < temp_limits[1]):
raise ValueError('Got unexpected limits `{}`'.format(limits))
return temp_limits[0], temp_limits[1]
def create_chip(input_reader, out_directory, output_file=None, row_limits=None, col_limits=None,
check_existence=True, check_older_version=False, preserve_nitf_information=False):
"""
Create a chip of the given sicd file. At least one of `row_limits` and
`col_limits` must be provided.
Parameters
----------
input_reader : str|SICDReader
out_directory : str
The directory of the output.
output_file : None|str
If `None`, then the name will mirror the original with row/col details appended.
row_limits : None|Tuple[int, int]
The limits for the rows, relative to this actual image, to be included.
col_limits : None|Tuple[int, int]
The limits for the columns, relative to this actual image, to be included.
check_existence : bool
Check for the existence of the file before overwriting?
check_older_version : bool
Try to use a less recent version of SICD (1.1), for possible application compliance issues?
preserve_nitf_information : bool
Try to preserve some of the original NITF information?
"""
def get_suffix(limits, shift):
if limits is None:
return 'all'
else:
return '{0:d}-{1:d}'.format(shift+limits[0], shift+limits[1])
if isinstance(input_reader, str):
input_reader = SICDReader(input_reader)
if not isinstance(input_reader, SICDReader):
raise TypeError('We require that the input is a SICD reader or path to a sicd file.')
row_limits = _verify_limits(row_limits)
col_limits = _verify_limits(col_limits)
if row_limits is None and col_limits is None:
raise ValueError('At least one of row_limits and col_limits must be provided.')
if output_file is None:
fname = os.path.split(input_reader.file_name)[1]
fstem, fext = os.path.splitext(fname)
fstem += '_{}_{}'.format(
get_suffix(row_limits, input_reader.sicd_meta.ImageData.FirstRow),
get_suffix(col_limits, input_reader.sicd_meta.ImageData.FirstCol))
fname = fstem + fext
else:
fname = os.path.split(output_file)[1]
conversion_utility(
input_reader, out_directory, output_files=fname,
row_limits=row_limits, column_limits=col_limits,
check_existence=check_existence,
check_older_version=check_older_version,
preserve_nitf_information=preserve_nitf_information)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Subset SICD file.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input_file', metavar='input_file',
help='Path input sicd data file.')
parser.add_argument(
'output_directory', metavar='output_directory',
help='Path to the output directory. This directory MUST exist.')
parser.add_argument(
'-o', '--output_file', default=None, type=str)
parser.add_argument(
'-r', '--row_lims', default=None, nargs=2, type=int,
help='Row limits for chip, integers: row_start row_stop')
parser.add_argument(
'-c', '--col_lims', default=None, nargs=2, type=int,
help='Column limits for chip, integers: col_start col_stop')
parser.add_argument(
'-p', '--preserve', action='store_true',
help='Try to preserve any NITF information?\n'
'This only applies in the event that the file being read is a NITF')
parser.add_argument(
'-w', '--overwrite', action='store_true',
help='Overwrite output file, if it already exists?')
parser.add_argument(
'--older', action='store_true',
help='Try to use a less recent version of SICD (1.1),\n'
'for possible application compliance issues?')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Verbose (level="INFO") logging?')
args = parser.parse_args()
level = 'INFO' if args.verbose else 'WARNING'
logging.basicConfig(level=level)
logger = logging.getLogger('sarpy')
logger.setLevel(level)
create_chip(
args.input_file, args.output_directory, output_file=args.output_file,
row_limits=args.row_lims, col_limits=args.col_lims,
check_existence=not args.overwrite,
check_older_version=args.older,
preserve_nitf_information=args.preserve)
| 5,388 | 34.688742 | 99 | py |
sarpy | sarpy-master/sarpy/utils/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/utils/cphd_utils.py | """
Extract information from the CPHD header for review.
From the command-line
>>> python -m sarpy.utils.cphd_utils <path to cphd file>
For a basic help on the command-line, check
>>> python -m sarpy.utils.cphd_utils --help
"""
from __future__ import print_function
import argparse
import sys
import functools
from xml.dom import minidom
from typing import Union, TextIO, BinaryIO
import os
from io import StringIO
from sarpy.io.phase_history.cphd import CPHDDetails
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
# Custom print function
print_func = print
def _define_print_function(destination):
"""
Define the print_func as necessary.
Parameters
----------
destination : TextIO
"""
global print_func
print_func = functools.partial(print, file=destination)
def _print_header(input_file):
# type: (Union[str, BinaryIO]) -> None
def _finalize():
if close_after:
file_object.close()
if initial_location is not None:
file_object.seek(initial_location)
if isinstance(input_file, str):
file_object = open(input_file, 'rb')
initial_location = None
close_after = True
elif hasattr(input_file, 'readline'):
file_object = input_file
initial_location = file_object.tell()
file_object.seek(0)
close_after = False
else:
raise TypeError(
'Input requires a file path or a binary mode file-like object')
while True:
lin = file_object.readline().strip()
if not isinstance(lin, bytes):
_finalize()
raise ValueError('Requires an input opened in binary mode')
if lin:
print_func(lin.decode())
else:
break
_finalize()
def _create_default_output_file(input_file):
# type: (Union[str, BinaryIO]) -> str
if isinstance(input_file, str):
return os.path.splitext(input_file)[0] + '.meta_dump.txt'
else:
return os.path.expanduser('~/Desktop/phase_history.meta_dump.txt')
def _print_structure(input_file):
# type: (Union[str, BinaryIO]) -> None
details = CPHDDetails(input_file)
data = details.get_cphd_bytes()
xml_str = minidom.parseString(data.decode()).toprettyxml(indent=' ', newl='\n')
# NB: this may or not exhibit platform dependent choices in which codec (i.e. latin-1 versus utf-8)
for i, entry in enumerate(xml_str.splitlines()):
if i == 0:
# Remove xml that gets inserted by minidom, if it's not actually there
if (not data.startswith(b'<?xml version')) and entry.startswith('<?xml version'):
continue
print_func(entry)
elif entry.strip() != '':
# Remove extra new lines if XML is already formatted
print_func(entry)
def print_cphd_metadata(input_file, destination=sys.stdout):
"""
Prints the full CPHD metadata (both header and CPHD structure) to the
given destination.
Parameters
----------
input_file : str|BinaryIO
destination : TextIO
"""
_define_print_function(destination)
if isinstance(input_file, str):
print_func('Details for CPHD file {}'.format(input_file))
print_func('---- CPHD Header Information ----')
_print_header(input_file)
print_func('')
print_func('')
print_func('---- CPHD Structure ----')
_print_structure(input_file)
print_func('')
def print_cphd_header(input_file, destination=sys.stdout):
"""
Prints the full CPHD header to the given destination.
Parameters
----------
input_file : str|BinaryIO
destination : TextIO
"""
_define_print_function(destination)
_print_header(input_file)
def print_cphd_xml(input_file, destination=sys.stdout):
"""
Prints the full CPHD header to the given destination.
Parameters
----------
input_file : str|BinaryIO
destination : TextIO
"""
_define_print_function(destination)
_print_structure(input_file)
def _dump_pattern(input_file, destination, call_method):
# type: (Union[str, BinaryIO], str, Callable) -> Union[None, str]
if destination == 'stdout':
call_method(input_file, destination=sys.stdout)
elif destination == 'string':
out = StringIO()
call_method(input_file, destination=out)
value = out.getvalue()
out.close() # free the buffer
return value
else:
the_out_file = _create_default_output_file(input_file) if destination == 'default' else destination
with open(the_out_file, 'w') as fi:
call_method(input_file, destination=fi)
def dump_cphd_metadata(input_file, destination):
"""
Dump the CPHD metadata (both header and CPHD structure) to the given
destination.
Parameters
----------
input_file : str|BinaryIO
Path to or binary file-like object containing a CPHD file.
destination : str
'stdout', 'string', 'default' (will use `file_name+'.meta_dump.txt'`),
or the path to an output file.
Returns
-------
None|str
There is only a return value if `destination=='string'`.
"""
_dump_pattern(input_file, destination, print_cphd_metadata)
def dump_cphd_header(input_file, destination):
"""
Dump the CPHD header to the given destination.
Parameters
----------
input_file : str|BinaryIO
Path to or binary file-like object containing a CPHD file.
destination : str
'stdout', 'string', 'default' (will use `file_name+'.meta_dump.txt'`),
or the path to an output file.
Returns
-------
None|str
There is only a return value if `destination=='string'`.
"""
_dump_pattern(input_file, destination, print_cphd_header)
def dump_cphd_xml(input_file, destination):
"""
Dump the CPHD structure to the given destination.
Parameters
----------
input_file : str|BinaryIO
Path to or binary file-like object containing a CPHD file.
destination : str
'stdout', 'string', 'default' (will use `file_name+'.meta_dump.txt'`),
or the path to an output file.
Returns
-------
None|str
There is only a return value if `destination=='string'`.
"""
_dump_pattern(input_file, destination, print_cphd_xml)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Create extract metadata information from a CPHD file.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input_file', metavar='input_file', help='Path input CPHD file.')
parser.add_argument(
'-o', '--output', default='default',
help="'default', 'stdout', or the path for an output file.\n"
"* If not provided (`default`), the output will be at '<input path>.txt' \n"
"* 'stdout' will print the information to standard out.\n"
"* Otherwise, this is expected to be the path to a file \n"
" and output will be written there.\n"
" NOTE: existing output files will be overwritten.")
parser.add_argument(
'-d', '--data', default='both', choices=['both', 'header', 'xml'],
help='Which information should be printed?')
args = parser.parse_args()
if args.data == 'both':
dump_cphd_metadata(args.input_file, args.output)
elif args.data == 'header':
dump_cphd_header(args.input_file, args.output)
elif args.data == 'xml':
dump_cphd_xml(args.input_file, args.output)
else:
raise ValueError('Got unhandled data option {}'.format(args.data))
| 7,698 | 27.835206 | 107 | py |
sarpy | sarpy-master/sarpy/processing/rational_polynomial.py | """
General purpose rational polynomial tools
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from typing import List, Tuple, Sequence, Optional, Union
import numpy
from numpy.polynomial import polynomial
from scipy.linalg import lstsq, LinAlgError
from sarpy.compliance import SarpyError
logger = logging.getLogger(__name__)
class SarpyRatPolyError(SarpyError):
"""A custom exception class for rational polynomial fitting errors."""
#################
# helper functions
def _get_num_variables(coeff_list: Sequence[Union[int, Tuple[int, ...]]]) -> int:
"""
Determine the number of variables by inspection of the coefficient list
Parameters
----------
coeff_list : Sequence
Returns
-------
int
"""
variables = None
for entry in coeff_list:
if isinstance(entry, int):
if variables is None:
variables = 1
elif variables != 1:
raise ValueError('Entry order mismatch')
else:
if variables is None:
variables = len(entry)
elif variables != len(entry):
raise ValueError('Entry order mismatch')
if variables is None:
raise ValueError('Unable to determine the number of variables')
return variables
def _map_list_to_poly_matrix(coeffs: Sequence[float], coeff_list: Sequence[Tuple[int, ...]]) -> numpy.ndarray:
"""
Maps the coefficients and coefficient listing to corresponding
numpy polynomial coefficient matrix.
Parameters
----------
coeffs : Sequence[float]
coeff_list : Sequence[Tuple[int, ...]]
Returns
-------
coefficient_array : numpy.ndarray
"""
variables = _get_num_variables(coeff_list)
matrix_shape = []
for i in range(variables):
matrix_shape.append(max(entry[i] for entry in coeff_list)+1)
coefficient_array = numpy.zeros(tuple(matrix_shape), dtype='float64')
for i, entry in enumerate(coeff_list):
coefficient_array[entry] = coeffs[i]
return coefficient_array
def get_default_coefficient_ordering(variables: int, order: int) -> Sequence[Tuple[int, ...]]:
"""
Gets a sensible coefficient ordering of a polynomial of given number of
variables and order.
Parameters
----------
variables : int
order : int
Returns
-------
coefficient_list : Tuple[Tuple[int, ...]]
List of the form `[(exponent 0, exponent 1, ...)]`, determining the ordering
of monomial terms in the associated multivariable polynomial.
"""
variables = int(variables)
order = int(order)
if variables < 1:
raise ValueError('variables must be at least 1')
if order < 1:
raise ValueError('order must be at least 1')
shape_details = tuple([order + 1 for _ in range(variables)])
coefficient_list = []
for index in numpy.ndindex(shape_details):
total_exponent = sum(index)
if total_exponent <= order:
coefficient_list.append(index)
return tuple(coefficient_list)
###################
# base rational polynomial fitting functions
def rational_poly_fit_1d(
x: numpy.ndarray,
data: numpy.ndarray,
coeff_list: Sequence[Union[int, Tuple[int]]],
cond: Optional[float] = None) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Fits a one variable rational polynomial according to the input coefficient
listing order.
Parameters
----------
x : numpy.ndarray
data : numpy.ndarray
coeff_list : List[int|Tuple[int]]
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
Returns
-------
numerator: numpy.ndarray
denominator: numpy.ndarray
Raises
------
SarpyRatPolyError
Convergence failures passed through
"""
if coeff_list[0] not in [0, (0, )]:
raise ValueError(
'The first entry of coeff_list is required to be the constant term `0`')
if not (x.size == data.size):
raise ValueError('Size mismatch among data entries')
x = x.flatten()
data = data.flatten()
# Enforcing that the denominator has constant term 1,
# P(x)/(1 + Q(x)) = d ->
# P(x) - d*Q(x) = d
# This can be formulated as a strictly linear problem A*t = d
A = numpy.empty((x.size, 2*len(coeff_list) - 1), dtype=numpy.float64)
for i, entry in enumerate(coeff_list):
if not (isinstance(entry, int) or (isinstance(entry, tuple) and len(entry) == 1 and isinstance(entry[0], int))):
raise TypeError('coeff_list must be a list of integers or length 1 tuples of ints')
if isinstance(entry, tuple):
entry = entry[0]
u = 1
if entry > 0:
u *= numpy.power(x, entry)
A[:, i] = u
if i > 0:
A[:, i+len(coeff_list) - 1] = -u*data
# perform least squares fit
try:
sol, residuals, rank, sing_values = lstsq(A, data, cond=cond)
except LinAlgError as e:
raise SarpyRatPolyError(str(e))
# if len(residuals) != 0:
residuals /= float(x.size)
logger.info(
'Performed rational polynomial fit, got\n\t'
'residuals {}\n\t'
'rank {}\n\t'
'singular values {}'.format(residuals, rank, sing_values))
numerator = numpy.zeros((len(coeff_list), ), dtype='float64')
denominator = numpy.zeros((len(coeff_list), ), dtype='float64')
denominator[0] = 1.0
numerator[:] = sol[:len(coeff_list)]
denominator[1:] = sol[len(coeff_list):]
return numerator, denominator
def rational_poly_fit_2d(
x: numpy.ndarray,
y: numpy.ndarray,
data: numpy.ndarray,
coeff_list: Sequence[Tuple[int, int]],
cond: Optional[float] = None) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Fits a two variable rational polynomial according to the input coefficient
listing order.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
data : numpy.ndarray
coeff_list : Sequence[Tuple[int, int]]
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
Returns
-------
numerator: numpy.ndarray
denominator: numpy.ndarray
Raises
------
SarpyRatPolyError
Convergence failures passed through
"""
if coeff_list[0] != (0, 0):
raise ValueError(
'The first entry of coeff_list is required to be the constant term `(0, 0)`')
if not (x.size == y.size and x.size == data.size):
raise ValueError('Size mismatch among data entries')
x = x.flatten()
y = y.flatten()
data = data.flatten()
# Enforcing that the denominator has constant term 1,
# P(x, y)/(1 + Q(x, y)) = d ->
# P(x, y) - d*Q(x, y) = d
# This can be formulated as a strictly linear problem A*t = d
A = numpy.empty((x.size, 2 * len(coeff_list) - 1), dtype=numpy.float64)
for i, entry in enumerate(coeff_list):
if len(entry) != 2:
raise TypeError('coeff_list must be a list of tuples of length 2')
u = 1
if entry[0] > 0:
u *= numpy.power(x, entry[0])
if entry[1] > 0:
u *= numpy.power(y, entry[1])
A[:, i] = u
if i > 0:
A[:, i + len(coeff_list) - 1] = -u*data
# perform least squares fit
try:
sol, residuals, rank, sing_values = lstsq(A, data, cond=cond)
except LinAlgError as e:
raise SarpyRatPolyError(str(e))
# if len(residuals) != 0:
residuals /= float(x.size)
logger.info(
'Performed rational polynomial fit, got\n\t'
'residuals {}\n\t'
'rank {}\n\t'
'singular values {}'.format(residuals, rank, sing_values))
numerator = numpy.zeros((len(coeff_list),), dtype='float64')
denominator = numpy.zeros((len(coeff_list),), dtype='float64')
denominator[0] = 1.0
numerator[:] = sol[:len(coeff_list)]
denominator[1:] = sol[len(coeff_list):]
return numerator, denominator
def rational_poly_fit_3d(
x: numpy.ndarray,
y: numpy.ndarray,
z: numpy.ndarray,
data: numpy.ndarray,
coeff_list: Sequence[Tuple[int, int, int]],
cond: Optional[float] = None) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Fits a three variable rational polynomial according to the input coefficient
listing order.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
z : numpy.ndarray
data : numpy.ndarray
coeff_list : Sequence[Tuple[int, int, int]]
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
Returns
-------
numerator: numpy.ndarray
denominator: numpy.ndarray
Raises
------
SarpyRatPolyError
Convergence failures passed through
"""
if coeff_list[0] != (0, 0, 0):
raise ValueError(
'The first entry of coeff_list is required to be the constant term `(0, 0, 0)`')
if not (x.size == y.size and x.size == z.size and x.size == data.size):
raise ValueError('Size mismatch among data entries')
x = x.flatten()
y = y.flatten()
z = z.flatten()
data = data.flatten()
# Enforcing that the denominator has constant term 1,
# P(x, y, z)/(1 + Q(x, y, z)) = d ->
# P(x, y, z) - d*Q(x, y, z) = d
# This can be formulated as a strictly linear problem A*t = d
A = numpy.empty((x.size, 2*len(coeff_list) - 1), dtype=numpy.float64)
for i, entry in enumerate(coeff_list):
if len(entry) != 3:
raise TypeError('coeff_list must be a list of tuples of length 3')
u = 1
if entry[0] > 0:
u *= numpy.power(x, entry[0])
if entry[1] > 0:
u *= numpy.power(y, entry[1])
if entry[2] > 0:
u *= numpy.power(z, entry[2])
A[:, i] = u
if i > 0:
A[:, i + len(coeff_list) - 1] = -u*data
# perform least squares fit
try:
sol, residuals, rank, sing_values = lstsq(A, data, cond=cond)
except LinAlgError as e:
raise SarpyRatPolyError(str(e))
# if len(residuals) != 0:
residuals /= float(x.size)
logger.info(
'Performed rational polynomial fit, got\n\t'
'residuals {}\n\t'
'rank {}\n\t'
'singular values {}'.format(residuals, rank, sing_values))
numerator = numpy.zeros((len(coeff_list),), dtype='float64')
denominator = numpy.zeros((len(coeff_list),), dtype='float64')
denominator[0] = 1.0
numerator[:] = sol[:len(coeff_list)]
denominator[1:] = sol[len(coeff_list):]
return numerator, denominator
####################
# rational polynomial definition
class RationalPolynomial(object):
r"""
A basic rational polynomial implementation. This assumes the data model
`input_data -> output_data` via the relation
.. math::
X = (x, y, ...) & = (input\_data - input\_offset)/input\_scale \\
(output\_data - output\_offset)/output\_scale & = Data = numerator(X)/denominator(X) \\
output\_data & = (numerator(X)/denominator(X))*output_scale + output\_offset
This object is callable, and acts as the evaluation function after construction.
That is, suppose we have
.. code::
rational_poly = RationalPolynomial(...) # suppose constructed as 2 variables
output_0 = rational_poly([x, y]) # pass in the two variables as a single array
output_1 = rational_poly(x, y) # pass in the two variables individually
# output_0 and output_1 should be identical
output_fail = rational_poly(x, y, z)
# this raises an exception for mismatch with the number of variables
"""
__slots__ = (
'_numerator', '_denominator', '_coeff_list', '_variables', '_input_offsets', '_input_scales',
'_output_offset', '_output_scale', '_numerator_array', '_denominator_array')
def __init__(
self,
numerator: Union[Sequence[float], numpy.ndarray],
denominator: Union[Sequence[float], numpy.ndarray],
coeff_list: Sequence[Tuple[int, ...]],
input_offsets: Sequence[float],
input_scales: Sequence[float],
output_offset: float,
output_scale: float):
"""
Parameters
----------
numerator : Sequence|numpy.ndarray
denominator : Sequence|numpy.ndarray
coeff_list : Sequence[Tuple[int, ...]]
input_offsets : Sequence[float]
input_scales : Sequence[float]
output_offset : float
output_scale : float
"""
self._coeff_list = coeff_list
self._variables = _get_num_variables(coeff_list)
if self._variables not in [1, 2, 3]:
raise ValueError('Functionality allows only 1, 2, or 3 variables.')
if len(numerator) != len(self._coeff_list):
raise ValueError('numerator must be the same length as coeff_list')
self._numerator = numerator
if len(denominator) != len(self._coeff_list):
raise ValueError('denominator must be the same length as coeff_list')
self._denominator = denominator
if len(input_offsets) != self._variables:
raise ValueError('The input_offsets must be the same length as the number of variables')
self._input_offsets = input_offsets
if len(input_scales) != self._variables:
raise ValueError('The input_scale must be the same length as the number of variables')
self._input_scales = input_scales
self._output_offset = float(output_offset)
self._output_scale = float(output_scale)
self._numerator_array = _map_list_to_poly_matrix(numerator, coeff_list)
self._denominator_array = _map_list_to_poly_matrix(denominator, coeff_list)
@property
def variables(self) -> int:
"""
The number of independent variables.
Returns
-------
int
"""
return self._variables
@property
def coefficient_list(self) -> Sequence[Tuple[int, ...]]:
"""
The coefficient list.
Returns
-------
Sequence
"""
return self._coeff_list
@property
def numerator(self) -> Sequence[float]:
"""
The numerator coefficients.
Returns
-------
Sequence
"""
return self._numerator
@property
def denominator(self) -> Sequence[float]:
"""
The denominator coefficients.
Returns
-------
Sequence
"""
return self._denominator
def __call__(self, *input_variables: List[numpy.ndarray]) -> numpy.ndarray:
def ensure_the_type(data):
if isinstance(data, (numpy.number, int, float, numpy.ndarray)):
return data
else:
return numpy.array(data)
if len(input_variables) not in [1, self.variables]:
raise ValueError('Got an unexpected number of input arguments')
if len(input_variables) == 1:
separate = False
inp_vars = ensure_the_type(input_variables[0])
else:
separate = True
inp_vars = [ensure_the_type(entry) for entry in input_variables]
# todo: should we evaluate the viability of the input?
if self.variables == 1:
x = (inp_vars - self._input_offsets[0])/self._input_scales[0]
value = polynomial.polyval(x, self._numerator_array) / \
polynomial.polyval(x, self._denominator_array)
elif self.variables == 2:
if separate:
x = (inp_vars[0] - self._input_offsets[0])/self._input_scales[0]
y = (inp_vars[1] - self._input_offsets[1])/self._input_scales[1]
else:
if inp_vars.shape[-1] != 2:
raise ValueError(
'Final dimension of input data ({}) must match the number '
'of variables ({}).'.format(inp_vars.shape, self.variables))
x = (inp_vars[..., 0] - self._input_offsets[0])/self._input_scales[0]
y = (inp_vars[..., 1] - self._input_offsets[1])/self._input_scales[1]
value = polynomial.polyval2d(x, y, self._numerator_array) / \
polynomial.polyval2d(x, y, self._denominator_array)
elif self.variables == 3:
if separate:
x = (inp_vars[0] - self._input_offsets[0])/self._input_scales[0]
y = (inp_vars[1] - self._input_offsets[1])/self._input_scales[1]
z = (inp_vars[2] - self._input_offsets[2])/self._input_scales[2]
else:
if inp_vars.shape[-1] != 3:
raise ValueError(
'Final dimension of input data ({}) must match the number '
'of variables ({}).'.format(inp_vars.shape, self.variables))
x = (inp_vars[..., 0] - self._input_offsets[0])/self._input_scales[0]
y = (inp_vars[..., 1] - self._input_offsets[1])/self._input_scales[1]
z = (inp_vars[..., 2] - self._input_offsets[2]) / self._input_scales[2]
value = polynomial.polyval3d(x, y, z, self._numerator_array) / \
polynomial.polyval3d(x, y, z, self._denominator_array)
else:
raise ValueError('More than 3 variables is unsupported')
return value*self._output_scale + self._output_offset
def _get_scale_and_offset(array: numpy.ndarray) -> Tuple[float, float]:
min_value = numpy.min(array)
max_value = numpy.max(array)
scale_value = 0.5*(max_value - min_value)
offset_value = 0.5*(max_value + min_value)
return offset_value, scale_value
def get_rational_poly_1d(
x: numpy.ndarray,
data: numpy.ndarray,
coeff_list: Optional[Sequence[Union[int, Tuple[int]]]] = None,
order: Optional[int] = None,
cond: Optional[float] = None) -> RationalPolynomial:
"""
Gets the RationalPolynomial instance that comes from fitting the provided data.
Parameters
----------
x : numpy.ndarray
data : numpy.ndarray
coeff_list : None|Sequence
order : None|int
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
Returns
-------
RationalPolynomial
Raises
------
SarpyRatPolyError
Convergence failures passed through
"""
if (coeff_list is None and order is None) or \
(coeff_list is not None and order is not None):
raise ValueError('Exact one of coeff_list and order must be provided.')
if order is not None:
coeff_list = get_default_coefficient_ordering(1, int(order))
if _get_num_variables(coeff_list) != 1:
raise ValueError('The number of variables defined by the coefficient list must be 1.')
scale_x, offset_x = _get_scale_and_offset(x)
scale_data, offset_data = _get_scale_and_offset(data)
numerator, denominator = rational_poly_fit_1d(
(x-offset_x)/scale_x,
(data-offset_data)/scale_data, coeff_list, cond=cond)
return RationalPolynomial(
numerator, denominator, coeff_list,
(offset_x, ), (scale_x, ),
offset_data, scale_data)
def get_rational_poly_2d(
x: numpy.ndarray,
y: numpy.ndarray,
data: numpy.ndarray,
coeff_list: Optional[Sequence[Tuple[int, int]]] = None,
order: Optional[int] = None,
cond: Optional[float] = None) -> RationalPolynomial:
"""
Gets the RationalPolynomial instance that comes from fitting the provided data.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
data : numpy.ndarray
coeff_list : None|Sequence
order : None|int
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
Returns
-------
RationalPolynomial
Raises
------
SarpyRatPolyError
Convergence failures passed through
"""
if (coeff_list is None and order is None) or \
(coeff_list is not None and order is not None):
raise ValueError('Exact one of coeff_list and order must be provided.')
if order is not None:
coeff_list = get_default_coefficient_ordering(2, int(order))
if _get_num_variables(coeff_list) != 2:
raise ValueError('The number of variables defined by the coefficient list must be 2.')
scale_x, offset_x = _get_scale_and_offset(x)
scale_y, offset_y = _get_scale_and_offset(y)
scale_data, offset_data = _get_scale_and_offset(data)
numerator, denominator = rational_poly_fit_2d(
(x-offset_x)/scale_x, (y-offset_y)/scale_y,
(data-offset_data)/scale_data, coeff_list, cond=cond)
return RationalPolynomial(
numerator, denominator, coeff_list,
(offset_x, offset_y), (scale_x, scale_y),
offset_data, scale_data)
def get_rational_poly_3d(
x: numpy.ndarray,
y: numpy.ndarray,
z: numpy.ndarray,
data: numpy.ndarray,
coeff_list: Optional[Sequence[Tuple[int, int]]] = None,
order: Optional[int] = None,
cond: Optional[float] = None) -> RationalPolynomial:
"""
Gets the RationalPolynomial instance that comes from fitting the provided data.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
z : numpy.ndarray
data : numpy.ndarray
coeff_list : None|Sequence
order : None|int
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
Returns
-------
RationalPolynomial
Raises
------
SarpyRatPolyError
Convergence failures passed through
"""
if (coeff_list is None and order is None) or \
(coeff_list is not None and order is not None):
raise ValueError('Exact one of coeff_list and order must be provided.')
if order is not None:
coeff_list = get_default_coefficient_ordering(3, int(order))
if _get_num_variables(coeff_list) != 3:
raise ValueError('The number of variables defined by the coefficient list must be 3.')
scale_x, offset_x = _get_scale_and_offset(x)
scale_y, offset_y = _get_scale_and_offset(y)
scale_z, offset_z = _get_scale_and_offset(z)
scale_data, offset_data = _get_scale_and_offset(data)
numerator, denominator = rational_poly_fit_3d(
(x-offset_x)/scale_x, (y-offset_y)/scale_y, (z-offset_z)/scale_z,
(data-offset_data)/scale_data, coeff_list, cond=cond)
return RationalPolynomial(
numerator, denominator, coeff_list,
(offset_x, offset_y, offset_z), (scale_x, scale_y, scale_z),
offset_data, scale_data)
####################
# collective rational polynomial function
class CombinedRationalPolynomial(object):
"""
Assemble a collection of RationalPolynomial objects with the same number of
variables into a single multi-variable output object.
"""
__slots__ = ('_collection', )
def __init__(self, *collection: List[RationalPolynomial]):
if len(collection) == 1 and isinstance(collection[0], Sequence):
collection = collection[0]
if len(collection) < 2:
raise ValueError('This requires more than a single input')
coll = []
variables = None
for entry in collection:
if not isinstance(entry, RationalPolynomial):
raise TypeError(
'Every input must be an instance of RationalPolynomial,\n\t'
'got type `{}`'.format(type(entry)))
if variables is None:
variables = entry.variables
elif variables != entry.variables:
raise TypeError(
'Every input must be an instance of RationalPolynomial with\n\t'
'the same number of variables, got type `{}` and `{}`'.format(variables, entry.variables))
coll.append(entry)
self._collection = tuple(coll)
def __call__(
self,
*args: List[numpy.ndarray],
combine: bool = True) -> Union[Tuple[numpy.ndarray, ...], numpy.ndarray]:
out = tuple([entry(*args) for entry in self._collection])
if combine:
return numpy.stack(out, axis=-1)
else:
return out
| 24,428 | 31.270806 | 120 | py |
sarpy | sarpy-master/sarpy/processing/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/processing/registration/regi.py | """
Basic image registration, generally best suited most suited for coherent image
collection. This is based pretty directly on an approach developed at Sandia and
generally referred to by the name "regi".
The relevant matlab code appears to be authored by Terry M. Calloway,
Sandia National Laboratories, and modified by Wade Schwartzkopf, NGA.
"""
__classification__ = 'UNCLASSIFIED'
__author__ = ["Thomas McCullough", "Terry M. Calloway", "Wade Schwartzkopf"]
import logging
from typing import List, Tuple, Dict, Union, Optional
import numpy
from scipy.signal import correlate2d
from scipy.interpolate import LinearNDInterpolator
from sarpy.io.general.base import BaseReader
logger = logging.getLogger(__name__)
def _validate_match_parameters(
reference_size: Tuple[int, int],
moving_size: Tuple[int, int],
match_box_size: Tuple[int, int],
moving_deviation: Tuple[int, int],
decimation: Tuple[int, int]) -> None:
"""
Validate the match paramaters based the size of the images.
Parameters
----------
reference_size : Tuple[int, int]
moving_size : Tuple[int, int]
match_box_size : Tuple[int, int]
moving_deviation : Tuple[int, int]
decimation : Tuple[int, int]
"""
if not ((match_box_size[0] % 2) == 1 and match_box_size[0] > 1 and
(match_box_size[1] % 2) == 1 and match_box_size[1] > 1):
raise ValueError('The match box size must have both odd entries greater than 1')
if not ((moving_deviation[0] % 2) == 1 and moving_deviation[0] > 1 and
(moving_deviation[1] % 2) == 1 and moving_deviation[1] > 1):
raise ValueError('The match box size must have both odd entries greater than 1')
limit_fraction = 0.5
if match_box_size[0]*decimation[0] > limit_fraction*reference_size[0] or \
match_box_size[1]*decimation[1] > limit_fraction*reference_size[1]:
raise ValueError(
'The size of the match box - {} with decimation - {} is too large\n\t'
'with respect tothe size of the reference image - {}'.format(
match_box_size, decimation, reference_size))
if match_box_size[0]*decimation[0] > limit_fraction*moving_size[0] or \
match_box_size[1]*decimation[1] > limit_fraction*moving_size[1]:
raise ValueError(
'The size of the match box - {} with decimation - {} is too close\n\t'
'to the size of the moving image - {}'.format(
match_box_size, decimation, moving_size))
def _populate_difference_structure(
mapping_values: List[List[Dict]]) -> None:
"""
Helper function for populating derivative estimates into our structure.
Parameters
----------
mapping_values: List[List[dict]]
"""
# NB: this assumes the expected structure
def do_diff(the_diff, the_count, direction, ref_loc0, mov_loc0, ref_loc1, mov_loc1):
if mov_loc0 is None or mov_loc1 is None:
return the_diff, the_count
the_diff += float(mov_loc1[direction] - mov_loc0[direction]) / \
float(ref_loc1[direction] - ref_loc0[direction])
the_count += 1
return the_diff, the_count
def basic_estimate_diff(entry, i, j):
ref_loc = entry['reference_location']
mov_loc = entry['moving_location']
if mov_loc is None:
return
if entry.get('row_derivative', None) is None:
# calculate row derivative
r_diff = 0.0
r_count = 0
# get value based on before
if i > 0:
o_entry = mapping_values[i-1][j]
do_diff(r_diff, r_count, 0, ref_loc, mov_loc,
o_entry['reference_location'], o_entry['moving_location'])
# get value based on after
if i < len(mapping_values) - 1:
o_entry = mapping_values[i+1][j]
do_diff(r_diff, r_count, 0, ref_loc, mov_loc,
o_entry['reference_location'], o_entry['moving_location'])
if r_count > 0:
row_der = r_diff/float(r_count)
entry['row_derivative'] = row_der
if row_der < 0.0:
logger.warning('Entry ({}, {}) has negative row derivative ({})'.format(i, j, row_der))
if entry.get('column_derivative', None) is None:
# calculate the column derivative
c_diff = 0.0
c_count = 0
# get the value based on before
if j > 0:
o_entry = mapping_values[i][j-1]
do_diff(c_diff, c_count, 1, ref_loc, mov_loc,
o_entry['reference_location'], o_entry['moving_location'])
# get value based on after
if j < len(mapping_values[0]) - 1:
o_entry = mapping_values[i][j+1]
do_diff(c_diff, c_count, 1, ref_loc, mov_loc,
o_entry['reference_location'], o_entry['moving_location'])
if c_count > 0:
col_der = c_diff/float(c_count)
entry['column_derivative'] = col_der
if col_der < 0.0:
logger.warning('Entry ({}, {}) has negative column derivative ({})'.format(i, j, col_der))
for row_index, grid_row in enumerate(mapping_values):
for col_index, element in enumerate(grid_row):
basic_estimate_diff(element, row_index, col_index)
def _subpixel_shift(values: numpy.ndarray) -> float:
"""
This is simplified port of the SAR toolbox matlab function fin_minms. This
uses data from an empirical fit derived from unknown origins to estimate where
the "real" minimum occurred.
Parameters
----------
values : numpy.ndarray
Must have length 3, with either `values[1] <= min(values[0], values[2])`
(a minimization problem), or `values[1] >= max(values[0], values[2])`
(a maximization problem). Maximization problems will be re-cast as
minimization through inversion.
Returns
-------
shift : float
This values will be (-1, 1), with -1 corresponding to the first location,
0 corresponding to the center location, and 1 corresponding to the final
location.
"""
if not (isinstance(values, numpy.ndarray) and values.ndim == 1 and values.size == 3):
raise ValueError('The input must be a 1-d array with three entries.')
if not (values[1] >= max(values[0], values[2]) or values[1] <= min(values[0], values[2])):
raise ValueError(
'The central entry must either be larger than the other two (maximization problem)\n\t'
'or smaller than the other two (minimization problem) - values {}'.format(values))
if values[1] >= max(values[0], values[2]):
# recast maximization problem as minimization
return _subpixel_shift(-values)
if values[0] == values[1] or values[1] == values[2]:
# no real information
return 0.0
if values[0] == values[2]:
# it's symmetric
return 0.0
values = (values + numpy.min(values)) # ensure that everything is positive by shifting up
# Here are the verbatim matlab comments:
# Algorithm -
# xm = min
# r = (rmsmid-rmsmin)/(rmsmax-rmsmin)
# empirical fit (r,xm) resembles arc of circle centered at xc=-11/8
# xm = 2(xc-3/8) + sqrt(4(xc-3/8)**2 - (r-1)((r-1)-2(xc-1)))
# empirical fit (r,xm) resembles arc of circle centered at xc=-6/8
# xm = 1-xc - sqrt(0.125 + 2(0.25-xc)**2 - (x-xc)**2)
nsr = 0.5
noise = nsr*numpy.max(values)
rms = numpy.sqrt(values - noise)
rmsmin = rms[1]
rmsmid = min(rms[0], rms[2])
rmsmax = max(rms[0], rms[2])
r = (rmsmid - rmsmin)/(rmsmax - rmsmin)
rm1 = r - 1.
fit_val = 12.25 - rm1*(rm1 + 4.75)
if not (2.5*2.5 < fit_val < 4.5*4.5):
# probaly impossible...
return 0.0
shift = -3.5 + numpy.sqrt(fit_val)
return -shift if rms[0] < rms[2] else shift
def _max_correlation_step(
reference_array: numpy.ndarray,
moving_array: numpy.ndarray,
do_subpixel: bool = False) -> Tuple[Optional[numpy.ndarray], Optional[float]]:
"""
Find the best match location of the moving array inside the reference array.
Parameters
----------
reference_array : numpy.ndarray
moving_array : numpy.ndarray
do_subpixel : bool
Include a subpixel registration effort?
Returns
-------
best_location : None|numpy.ndarray
Will return `None` if there is no information, i.e. the reference patch
or moving patch is all 0. Otherwise, this will be a numpy array
`[row, column]` of the location of highest correlation, determined via
:func:`numpy.argmax`.
maximum_correlation : None|float
"""
if reference_array.ndim != 2 or moving_array != 2:
raise ValueError('Input arrays must be 2-dimensional')
if reference_array.shape[0] < moving_array.shape[0] or reference_array.shape[1] < moving_array.shape[1]:
raise ValueError(
'It is required that the moving array (shape {}) is strictly contained\n\t'
'inside the reference array (shape {})'.format(moving_array.shape, reference_array.shape))
# NB: sqrt suggested by matlab, presumably to dampen the importance of bright returns?
reference_array = numpy.sqrt(numpy.abs(reference_array))
if numpy.all(reference_array == 0):
return None, None
moving_array = numpy.sqrt(numpy.abs(moving_array))
if numpy.all(moving_array == 0):
return None, None
kernel = numpy.ones(reference_array.shape, dtype='float32')
# now, find the best match
match_values = correlate2d(reference_array, moving_array, mode='valid')
norm_values = correlate2d(kernel, moving_array*moving_array, mode='valid')
mask = (norm_values > 0)
# reduce to dot product of the unit vectors, and we pick the best match
match_values[mask] /= numpy.sqrt(norm_values[mask])
# raw maximum location
raw_max_location = numpy.unravel_index(numpy.argmax(match_values), match_values.shape)
maximum_value = match_values[raw_max_location[0], raw_max_location[1]]
if do_subpixel:
sub_shift = numpy.zeros((2,), dtype='float64')
sub_shift[0] += _subpixel_shift(match_values[raw_max_location[0]-1:raw_max_location[0]+1, raw_max_location[1]])
sub_shift[1] += _subpixel_shift(match_values[raw_max_location[0], raw_max_location[1]-1:raw_max_location[1]+1])
raw_max_location += sub_shift
if (moving_array.shape[0] % 2) == 0 or (moving_array.shape[1] % 2) == 0:
shift = numpy.zeros((2, ), dtype='float64')
else:
shift = numpy.zeros((2, ), dtype='int64')
shift[0] = 0.5*(moving_array.shape[0] - 1)
shift[1] = 0.5*(moving_array.shape[1] - 1)
return shift + raw_max_location, maximum_value
def _single_step_location(
reference_data: Union[BaseReader, numpy.ndarray],
reference_index: Optional[int],
reference_size: Tuple[int, int],
moving_data: Union[BaseReader, numpy.ndarray],
moving_index: Optional[int],
moving_size: Tuple[int, int],
reference_location: Tuple[int, int],
moving_location: Tuple[int, int],
match_box_size: Tuple[int, int] = (25, 25),
moving_deviation: Tuple[int, int] = (15, 15),
decimation: Tuple[int, int] = (1, 1)) -> Tuple[Optional[Tuple[int, int]], Optional[float]]:
"""
Perform a single step of the reference search by finding the best matching
location at given size and scale.
Parameters
----------
reference_data : BaseReader|numpy.ndarray
reference_index : None|int
reference_size : Tuple[int, int]
moving_data : BaseReader|numpy.ndarray
moving_index : None|int
moving_size : Tuple[int, int]
reference_location : Tuple[int, int]
moving_location : Tuple[int, int]
match_box_size : Tuple[int, int]
moving_deviation : Tuple[int, int]
decimation : Tuple[int, int]
Returns
-------
best_location : None|Tuple[int, int]
Will return `None` if there is no information, i.e. the reference patch
or moving patch is all 0.
maximum_correlation : None|float
"""
# NB: we require odd entries here
match_box_half = (int((match_box_size[0] - 1)/2), int((match_box_size[1] - 1)/2))
deviation_half = (int((moving_deviation[0] - 1)/2), int((moving_deviation[1] - 1)/2))
moving_half = (deviation_half[0] + match_box_half[0], deviation_half[1] + match_box_half[1])
# vet the reference patch location
ref_box_start = (
reference_location[0] - match_box_half[0]*decimation[0],
reference_location[1] - match_box_half[1]*decimation[1])
if ref_box_start[0] < 0 or \
ref_box_start[1] < 0 or \
ref_box_start[0] > reference_size[0] - match_box_size[0]*decimation[0] + 1 or \
ref_box_start[1] > reference_size[1] - match_box_size[1]*decimation[1] + 1:
raise ValueError(
'Overflows bounds. Cannot proceed with reference image of size {}\n\t'
'using reference box of size {} and decimation {}\n\t'
'at reference location {}'.format(reference_size, match_box_size, decimation, reference_location))
ref_box_end = (
ref_box_start[0] + match_box_size[0]*decimation[0] + 1,
ref_box_start[1] + match_box_size[1]*decimation[1] + 1)
# fetch the reference image patch
if isinstance(reference_data, BaseReader):
reference_array = reference_data[
ref_box_start[0]:ref_box_end[0]:decimation[0],
ref_box_start[1]:ref_box_end[1]:decimation[1],
reference_index]
else:
reference_array = reference_data[
ref_box_start[0]:ref_box_end[0]:decimation[0],
ref_box_start[1]:ref_box_end[1]:decimation[1]]
# vet the moving patch location
mov_loc_temp = [moving_location[0], moving_location[1]]
for i in [0, 1]:
if mov_loc_temp[i] < moving_half[i]*decimation[i]:
mov_loc_temp[i] = moving_half[i]*decimation[i]
elif mov_loc_temp[i] > moving_size[i] - moving_half[i]*decimation[i] + 1:
mov_loc_temp[i] = moving_size[i] - moving_half[i]*decimation[i] + 1
moving_box_start = (
mov_loc_temp[0] - moving_half[0]*decimation[0],
mov_loc_temp[1] - moving_half[1]*decimation[1])
moving_box_end = (
mov_loc_temp[0] + moving_half[0]*decimation[0] + 1,
mov_loc_temp[1] + moving_half[1]*decimation[1] + 1)
# fetch the moving patch array for max correlation check
if isinstance(moving_data, BaseReader):
moving_array = moving_data[
moving_box_start[0]:moving_box_end[0]:decimation[0],
moving_box_start[1]:moving_box_end[1]:decimation[1],
moving_index]
else:
moving_array = moving_data[
moving_box_start[0]:moving_box_end[0]:decimation[0],
moving_box_start[1]:moving_box_end[1]:decimation[1]]
# find the point of maximum correlation
do_subpixel = numpy.all(decimation == 1)
best_temp_location, maximum_correlation = _max_correlation_step(reference_array, moving_array, do_subpixel=do_subpixel)
if best_temp_location is None:
return best_temp_location, maximum_correlation
return ((best_temp_location[0])*decimation[0] + mov_loc_temp[0], (best_temp_location[1])*decimation[1] + mov_loc_temp[1]), maximum_correlation
def _single_step_grid(
reference_data: Union[BaseReader, numpy.ndarray],
reference_index: Optional[int],
reference_size: Tuple[int, int],
moving_data: Union[BaseReader, numpy.ndarray],
moving_index: Optional[int],
moving_size: Tuple[int, int],
reference_box_rough: Tuple[int, int],
moving_box_rough: Tuple[int, int],
match_box_size: Tuple[int, int] = (25, 25),
moving_deviation: Tuple[int, int] = (15, 15),
decimation: Tuple[int, int] = (1, 1),
previous_values: Optional[List[List[Dict]]] = None):
"""
We will determine a series of best matching (small size) patch locations
between the pixel area of `reference_data` laid out in `reference_box_rough`
and the pixel area of `moving_data` laid out in `moving_box_rough` - which
should be very close to the same size.
Parameters
----------
reference_data : BaseReader|numpy.ndarray
reference_index : None|int
reference_size : Tuple[int, int]
moving_data : BaseReader|numpy.ndarray
moving_index : None|int
moving_size : Tuple[int, int]
reference_box_rough : Tuple[int, int, int, int]
moving_box_rough : Tuple[int, int, int, int]
match_box_size : Tuple[int, int]
moving_deviation : Tuple[int, int]
decimation : Tuple[int, int]
previous_values : None|List[List[dict]]
Returns
-------
result_values : List[List[dict]]
entry `[i][j]` tell the mapping of the reference location in nominal
reference grid to moving grid location
:code:`{'reference_location': (row, column),
'moving_location': (matched_row, matched_column),
'max_correlation': <value>}`
"""
def determine_best_guess(ref_point):
if row_interp is None:
return (
ref_point[0] - reference_box_rough[0] + moving_box_rough[0],
ref_point[1] - reference_box_rough[2] + moving_box_rough[2])
else:
return (
row_interp(ref_point[0]),
col_interp(ref_point[1]))
effective_ref_size = (
int(reference_box_rough[1] - reference_box_rough[0]),
int(reference_box_rough[3] - reference_box_rough[2]))
effective_move_size = (
int(moving_box_rough[1] - moving_box_rough[0]),
int(moving_box_rough[3] - moving_box_rough[2]))
# validate the parameters at this scale
_validate_match_parameters(
effective_ref_size, effective_move_size, match_box_size, moving_deviation, decimation)
# construct the grid which we are going to try to map
half_row_size = int(0.5*(match_box_size[0] - 1)*decimation[0])
half_col_size = int(0.5*(match_box_size[1] - 1)*decimation[1])
row_grid = numpy.arange(reference_box_rough[0] + half_row_size, reference_box_rough[1] + half_row_size, 2*half_row_size)
col_grid = numpy.arange(reference_box_rough[2] + half_col_size, reference_box_rough[3] + half_col_size, 2*half_col_size)
# create a mapping which estimates (ref_row, ref_col) -> (mov_row, mov_col)
row_interp = None
col_interp = None
if previous_values is not None:
# determine the approximate derivative values
_populate_difference_structure(previous_values)
# get values from our grid, excluding places which require a negative derivative
ref_locs = []
mov_locs = []
for row_values in previous_values:
for entry in row_values:
row_der = entry.get('row_derivative', None)
col_der = entry.get('column_derivative', None)
if row_der is not None and 0.25 < row_der < 2.0 and \
col_der is not None and 0.25 < col_der < 2.0:
ref_locs.append(entry['reference_location'])
mov_locs.append(entry['moving_location'])
if len(ref_locs) > 3:
ref_locs = numpy.array(ref_locs, dtype='float64')
mov_locs = numpy.array(mov_locs, dtype='float64')
# create an interpolation function mapping reference coords -> moving coords (so far)
row_interp = LinearNDInterpolator(ref_locs, mov_locs[:, 0])
col_interp = LinearNDInterpolator(ref_locs, mov_locs[:, 1])
result_values = []
# generate our grid of locations to compare, and then compare
for row_grid_entry in row_grid:
the_row_list = []
result_values.append(the_row_list)
for col_grid_entry in col_grid:
ref_loc = (row_grid_entry, col_grid_entry)
best_loc, max_correlation = _single_step_location(
reference_data, reference_index, reference_size,
moving_data, moving_index, moving_size,
ref_loc, determine_best_guess(ref_loc),
match_box_size=match_box_size, moving_deviation=moving_deviation, decimation=decimation)
the_row_list.append(
{
'reference_location': ref_loc,
'moving_location': best_loc,
'max_correlation': max_correlation
})
return result_values
def register_arrays(
reference_data: numpy.ndarray,
moving_data: numpy.ndarray) -> List[List[Dict]]:
"""
Register the moving_data array to the reference_data array using the regi algorithm.
Parameters
----------
reference_data : numpy.ndarray
moving_data : numpy.ndarray
Returns
-------
result_values : List[List[dict]]
entry `[i][j]` tell the mapping of the reference location in nominal
reference grid to moving grid location
:code:`{'reference_location': (row, column),
'moving_location': (matched_row, matched_column),
'max_correlation': <value>}`
"""
if not isinstance(reference_data, numpy.ndarray):
raise TypeError('reference_data must be a numpy array')
if not isinstance(moving_data, numpy.ndarray):
raise TypeError('moving_data must be a numpy array')
if reference_data.ndim != 2:
raise ValueError('data arrays must be two-dimensional')
if reference_data.shape != moving_data.shape:
raise ValueError('data arrays must have the same (2-d) shape')
the_size = reference_data.shape
box_rough = (0, the_size[0], 0, the_size[1])
size_min = min(*the_size)
if size_min > 2000:
decimation = (8, 8)
elif size_min > 1000:
decimation = (4, 4)
elif size_min > 500:
decimation = (2, 2)
else:
decimation = (1, 1)
match_box = (min(25, the_size[0]), min(25, the_size[1]))
deviation = (min(15, the_size[0]), min(15, the_size[1]))
return _single_step_grid(
reference_data, None, the_size,
moving_data, None, the_size,
box_rough, box_rough,
match_box_size=match_box, moving_deviation=deviation, decimation=decimation)
| 22,629 | 39.996377 | 146 | py |
sarpy | sarpy-master/sarpy/processing/registration/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/processing/registration/basic.py | """
Basic image registration tools
"""
__classification__ = "UNCLASSIFIED"
__author__ = 'Thomas McCullough'
import logging
from typing import Sequence, Union, Tuple, Any
import numpy
from scipy.optimize import minimize
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.geometry.geocoords import ecf_to_geodetic
from sarpy.geometry.point_projection import ground_to_image
logger = logging.getLogger(__name__)
def best_physical_location_fit(
structs: Sequence[Union[SICDType, SIDDType1, SIDDType2]],
locs: Union[numpy.ndarray, list, tuple],
**minimization_args) -> Tuple[numpy.ndarray, float, Any]:
"""
Given a collection of SICD and/or SIDDs and a collection of image coordinates,
each of which identifies the pixel location of the same feature in the
respective image, determine the (best fit) geophysical location of this feature.
This assumes that any adjustable parameters used for the SICD/SIDD projection
model have already been applied (via :func:`define_coa_projection`).
Parameters
----------
structs : Sequence[SICDType|SIDDType1|SIDDType2]
The collection of sicds/sidds, of length `N`
locs : numpy.ndarray|list|tuple
The image coordinate collection, of shape `(N, 2)`
minimization_args
The keyword arguments (after `args` argument) passed through to
:func:`scipy.optimize.minimize`. This will default to `'Powell'`
optimization, which seems generally much more reliable for this
problem than the steepest descent based approaches.
Returns
-------
ecf_location : numpy.ndarray
The location in question, in ECF coordinates
residue : float
The mean square residue of the physical distance between the given
location and the image locations projected into the surface of
given HAE value
result
The minimization result object
"""
def get_mean_location(hae_value, log_residue=False):
ecf_locs = numpy.zeros((points, 3), dtype='float64')
for i, (loc, struct) in enumerate(zip(locs, structs)):
ecf_locs[i, :] = struct.project_image_to_ground(loc, projection_type='HAE', hae0=hae_value)
ecf_mean = numpy.mean(ecf_locs, axis=0)
diff = ecf_locs - ecf_mean
residue = numpy.sum(diff*diff, axis=1)
if log_residue:
logger.info(
'best physical location residues [m^2]\n{}'.format(residue))
avg_residue = numpy.mean(residue)
return ecf_mean, avg_residue
def average_residue(hae_value):
return get_mean_location(hae_value)[1]
points = len(structs)
if points < 2:
raise ValueError(
'At least 2 structs must be present to determine the best location')
if points != locs.shape[0]:
raise ValueError(
'The number of structs must match the number of locations')
struct0 = structs[0]
if isinstance(struct0, SICDType):
h0 = struct0.GeoData.SCP.LLH.HAE
elif isinstance(struct0, (SIDDType1, SIDDType2)):
ref_ecf = struct0.Measurement.ReferencePoint.ECEF.get_array()
h0 = ecf_to_geodetic(ref_ecf)[2]
else:
raise TypeError('Got unexpected structure type {}'.format(type(struct0)))
if 'method' not in minimization_args:
minimization_args['method'] = 'Powell'
result = minimize(average_residue, h0, **minimization_args)
if not result.success:
raise ValueError('Optimization failed {}'.format(result))
values = get_mean_location(result.x, log_residue=True)
return values[0], values[1], result
def _find_best_adjustable_parameters_sicd(
sicd: SICDType,
ecf_coords: numpy.ndarray,
img_coords: numpy.ndarray,
**minimization_args) -> Tuple[numpy.ndarray, numpy.ndarray, float, float, Any]:
"""
Find the best projection model adjustable parameters (in `'ECF'` coordinate frame)
to fit the geophysical coordinate locations to the image coordinate locations.
Parameters
----------
sicd : SICDType
ecf_coords : numpy.ndarray
The geophysical coordinates of shape `(N, 3)` for the identified features
img_coords : numpy.ndarray
The image coordinates of shape `(N, 2)` for the identified features
minimization_args
The keyword arguments (after `args` argument) passed through to
:func:`scipy.optimize.minimize`. This will default to `'Powell'`
optimization, which seems generally much more reliable for this
problem than the steepest descent based approaches.
Returns
-------
delta_arp : numpy.ndarray
delta_varp : numpy.ndarray
delta_range : float
residue : float
Average pixel coordinate distance across the features between projected
and observed pixel locations
result
The minimization result
"""
row = sicd.Grid.Row.UVectECF.get_array()
col = sicd.Grid.Col.UVectECF.get_array()
def get_params(perturb):
da = perturb[0]*row + perturb[1]*col
dv = perturb[2:5]
dr = perturb[5]
return da, dv, dr
def get_sq_residue(perturb):
da, dv, dr = get_params(perturb)
img_proj, _, _ = ground_to_image(
ecf_coords, sicd, max_iterations=100, use_structure_coa=False,
delta_arp=da, delta_varp=dv, range_bias=dr,
adj_params_frame='ECF')
diff = (img_proj - img_coords)
return numpy.sum(diff*diff, axis=1)
def average_residue(perturb):
res = get_sq_residue(perturb)
return numpy.mean(res)
if 'method' not in minimization_args:
minimization_args['method'] = 'Powell'
p0 = numpy.zeros((6, ), dtype='float64')
result = minimize(average_residue, p0, **minimization_args)
if not result.success:
raise ValueError('Optimization failed {}'.format(result))
logger.info(
'best adjustable parameters residues [pix^2]\n{}'.format(
get_sq_residue(result.x)))
delta_arp, delta_varp, delta_range = get_params(result.x)
return delta_arp, delta_varp, delta_range, result.fun, result
def _find_best_adjustable_parameters(
struct: Union[SICDType, SIDDType1, SIDDType2],
ecf_coords: numpy.ndarray,
img_coords: numpy.ndarray,
**minimization_args) -> Tuple[numpy.ndarray, numpy.ndarray, float, float, Any]:
"""
Find the best projection model adjustable parameters (in `'ECF'` coordinate frame)
to fit the geophysical coordinate locations to the image coordinate locations.
Parameters
----------
struct : SICDType|SIDDType1|SIDDType2
ecf_coords : numpy.ndarray
The geophysical coordinates of shape `(N, 3)` for the identified features
img_coords : numpy.ndarray
The image coordinates of shape `(N, 2)` for the identified features
minimization_args
The keyword arguments (after `args` argument) passed through to
:func:`scipy.optimize.minimize`. This will default to `'Powell'`
optimization, which seems generally much more reliable for this
problem than the steepest descent based approaches.
Returns
-------
delta_arp : numpy.ndarray
delta_varp : numpy.ndarray
delta_range : float
residue : float
Average pixel coordinate distance across the features between projected
and observed pixel locations
result
The minimization result
"""
def get_params(perturb):
da = perturb[0:3]
dv = perturb[3:6]
dr = perturb[6]
return da, dv, dr
def get_sq_residue(perturb):
da, dv, dr = get_params(perturb)
img_proj, _, _ = ground_to_image(
ecf_coords, struct, max_iterations=100, use_structure_coa=False,
delta_arp=da, delta_varp=dv, range_bias=dr,
adj_params_frame='ECF')
diff = (img_proj - img_coords)
return numpy.sum(diff*diff, axis=1)
def average_residue(perturb):
return numpy.mean(get_sq_residue(perturb))
if 'method' not in minimization_args:
minimization_args['method'] = 'Powell'
p0 = numpy.zeros((7,), dtype='float64')
result = minimize(average_residue, p0, **minimization_args)
if not result.success:
raise ValueError('Optimization failed {}'.format(result))
logger.info(
'best adjustable parameters residues [pix^2]\n{}'.format(
get_sq_residue(result.x)))
delta_arp, delta_varp, delta_range = get_params(result.x)
return delta_arp, delta_varp, delta_range, result.fun, result
def find_best_adjustable_parameters(
struct: Union[SICDType, SIDDType1, SIDDType2],
ecf_coords: numpy.ndarray,
img_coords: numpy.ndarray,
**minimization_args) -> Tuple[numpy.ndarray, numpy.ndarray, float, float, Any]:
"""
Find the best projection model adjustable parameters (in `'ECF'` coordinate frame)
to fit the geophysical coordinate locations to the image coordinate locations.
Parameters
----------
struct : SICDType|SIDDType1|SIDDType2
ecf_coords : numpy.ndarray
The geophysical coordinates of shape `(N, 3)` for the identified features
img_coords : numpy.ndarray
The image coordinates of shape `(N, 2)` for the identified features
minimization_args
The keyword arguments (after `args` argument) passed through to
:func:`scipy.optimize.minimize`. This will default to `'Powell'`
optimization, which seems generally much more reliable for this
problem than the steepest descent based approaches.
Returns
-------
delta_arp : numpy.ndarray
delta_varp : numpy.ndarray
delta_range : float
residue : float
Average pixel coordinate distance across the features between projected
and observed pixel locations
result
The minimization result
"""
return _find_best_adjustable_parameters(struct, ecf_coords, img_coords, **minimization_args)
| 10,192 | 35.403571 | 103 | py |
sarpy | sarpy-master/sarpy/processing/sicd/subaperture.py | """
Sub-aperture processing methods.
"""
__author__ = 'Thomas McCullough'
__classification__ = "UNCLASSIFIED"
import logging
from typing import Union, Generator, Tuple, List, Optional, Sequence
import numpy
from scipy.constants import speed_of_light
from sarpy.processing.sicd.fft_base import FFTCalculator, fft, ifft, fftshift, fft2_sicd, ifft2_sicd
from sarpy.io.general.slice_parsing import validate_slice_int, verify_slice
from sarpy.processing.sicd.normalize_sicd import DeskewCalculator
from sarpy.processing.ortho_rectify import OrthorectificationHelper, OrthorectificationIterator
from sarpy.visualization.remap import RemapFunction
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
logger = logging.getLogger(__name__)
####################
# Module variables providing default values
_METHOD_VALUES = ('NORMAL', 'FULL', 'MINIMAL')
def frame_definition(
array_size: int,
frame_count: int = 9,
aperture_fraction: float = 0.2,
fill: Union[int, float] = 1,
method: str = 'FULL') -> Tuple[List[Tuple[int, int]], int]:
"""
Get the frame definition along the desired axis for subaperture processing.
Parameters
----------
array_size : int
The size of the given array.
frame_count : int
The number of frames to calculate.
aperture_fraction : float
The relative size of each aperture window.
fill : float|int
The fft fill value.
method : str
The subaperture processing method, which must be one of
`('NORMAL', 'FULL', 'MINIMAL')`.
Returns
-------
frame_definition: List[Tuple[int, int]]
output_resolution: int
"""
method = method.upper()
if method not in _METHOD_VALUES:
raise ValueError('method must be one of {}, got {}'.format(_METHOD_VALUES, method))
fill = float(fill)
if fill < 0.9999999:
raise ValueError('fill must be at least 1.0, got {}'.format(fill))
# determine our functional array and processing sizes
functional_array_size = array_size/fill
left_edge = int(numpy.round(0.5*(array_size - functional_array_size)))
processing_size = array_size - 2*left_edge
# determine the (static) size of each sub-aperture
subaperture_size = int(numpy.ceil(aperture_fraction*processing_size))
# determine the step size
step = 0 if frame_count == 1 else \
int(numpy.floor((processing_size - subaperture_size)/float(frame_count-1)))
if method == 'NORMAL':
output_resolution = int(numpy.ceil(aperture_fraction*array_size))
elif method == 'FULL':
output_resolution = array_size
elif method == 'MINIMAL':
output_resolution = int(numpy.ceil(processing_size/float(frame_count)))
else:
raise ValueError('Got unhandled method {}'.format(method))
frames = []
start_offset = left_edge
for i in range(frame_count):
frames.append((start_offset, start_offset + subaperture_size))
start_offset += step
return frames, output_resolution
#####################################
# The sub-aperture processing methods
def _validate_input(array: numpy.ndarray) -> numpy.ndarray:
if not isinstance(array, numpy.ndarray):
raise TypeError('array must be a numpy array. Got type {}'.format(type(array)))
if not numpy.iscomplexobj(array):
raise ValueError('array must be a complex array, got dtype {}'.format(array.dtype))
if array.ndim != 2:
raise ValueError('array must be two dimensional. Got shape {}'.format(array.shape))
return array
def _validate_dimension(dimension: int) -> int:
dimension = int(dimension)
if dimension not in (0, 1):
raise ValueError('dimension must be 0 or 1, got {}'.format(dimension))
return dimension
def subaperture_processing_array(
array: numpy.ndarray,
aperture_indices: Tuple[int, int],
output_resolution: int,
dimension: int = 0) -> numpy.ndarray:
"""
Perform the sub-aperture processing on the given complex array data.
Parameters
----------
array : numpy.ndarray
The complex array data. Dimension other than 2 is not supported.
aperture_indices : Tuple[int, int]
The start/stop indices for the subaperture processing.
output_resolution : int
The output resolution parameter.
dimension : int
The dimension along which to perform the sub-aperture processing. Must be
one of 0 or 1.
Returns
-------
numpy.ndarray
"""
array = _validate_input(array)
dimension = _validate_dimension(dimension)
return subaperture_processing_phase_history(
fftshift(fft(array, axis=dimension), axes=dimension),
aperture_indices, output_resolution, dimension=dimension)
def subaperture_processing_phase_history(
phase_array: numpy.ndarray,
aperture_indices: Tuple[int, int],
output_resolution: int,
dimension: int = 0) -> numpy.ndarray:
"""
Perform the sub-aperture processing on the given complex phase history data.
Parameters
----------
phase_array : numpy.ndarray
The complex array data. Dimension other than 2 is not supported.
aperture_indices : Tuple[int, int]
The start/stop indices for the subaperture processing.
output_resolution : int
The output resolution parameter.
dimension : int
The dimension along which to perform the sub-aperture processing. Must be
one of 0 or 1.
Returns
-------
numpy.ndarray
"""
phase_array = _validate_input(phase_array)
dimension = _validate_dimension(dimension)
if dimension == 0:
return ifft(phase_array[aperture_indices[0]:aperture_indices[1], :], axis=0, n=output_resolution)
else:
return ifft(phase_array[:, aperture_indices[0]:aperture_indices[1]], axis=1, n=output_resolution)
class SubapertureCalculator(FFTCalculator):
"""
Class for performing sub-aperture processing from a reader instance.
It is important to note that full resolution is required for along the
processing dimension, so sub-sampling along the processing dimension does
not decrease the amount of data which must be fetched.
"""
__slots__ = ('_frame_count', '_aperture_fraction', '_method', '_frame_definition')
def __init__(
self,
reader: Union[str, SICDTypeReader],
dimension: int = 0,
index: int = 0,
block_size: int = 10,
frame_count: int = 9,
aperture_fraction: float = 0.2,
method: str = 'FULL'):
"""
Parameters
----------
reader : str|SICDTypeReader
Input file path or reader object, which must be of sicd type.
dimension : int
The dimension over which to split the sub-aperture.
index : int
The sicd index to use.
block_size : int
The approximate processing block size to fetch, given in MB. The
minimum value for use here will be 1.
frame_count : int
The number of frames to calculate.
aperture_fraction : float
The relative size of each aperture window.
method : str
The subaperture processing method, which must be one of
`('NORMAL', 'FULL', 'MINIMAL')`.
"""
self._frame_count = 9
self._aperture_fraction = 0.2
self._method = 'FULL'
self._frame_definition = None
super(SubapertureCalculator, self).__init__(
reader, dimension=dimension, index=index, block_size=block_size)
self.frame_count = frame_count
self.aperture_fraction = aperture_fraction
self.method = method
@property
def frame_count(self) -> int:
"""
int: The frame count.
"""
return self._frame_count
@frame_count.setter
def frame_count(self, value):
value = int(value)
if value < 1:
raise ValueError('frame_count must be a positive integer.')
self._frame_count = value
@property
def aperture_fraction(self) -> float:
"""
float: The relative aperture fraction size.
"""
return self._aperture_fraction
@aperture_fraction.setter
def aperture_fraction(self, value):
value = float(value)
if not (0 < value < 1):
raise ValueError(
'aperture_fraction must be in the range (0, 1), got {}'.format(value))
self._aperture_fraction = value
@property
def method(self) -> str:
"""
str: The subaperture method.
"""
return self._method
@method.setter
def method(self, value):
value = value.upper()
if value not in _METHOD_VALUES:
raise ValueError('method must be one of {}, got {}'.format(_METHOD_VALUES, value))
self._method = value
def _parse_frame_argument(self, the_frame):
if the_frame is None:
return numpy.arange(self.frame_count)
elif isinstance(the_frame, slice):
the_frame = verify_slice(the_frame, self.frame_count)
return numpy.arange(the_frame.start, the_frame.stop, the_frame.step)
elif isinstance(the_frame, int):
return validate_slice_int(the_frame, self.frame_count)
elif isinstance(the_frame, (list, tuple)):
return self._parse_frame_argument(numpy.array(the_frame))
elif isinstance(the_frame, numpy.ndarray):
if not issubclass(the_frame.dtype.type, numpy.integer):
raise ValueError(
'The last slice dimension was a numpy array of unsupported non-integer '
'dtype {}'.format(the_frame.dtype))
if the_frame.ndim != 1:
raise ValueError(
'The last slice dimension was a numpy array which was not one-dimensional, '
'which is of unsupported.')
out = the_frame.copy()
for i, entry in enumerate(out):
if (entry <= -self.frame_count) or (entry >= self.frame_count):
raise ValueError(
'The last slice dimension was a numpy array, and entry {} has '
'value {}, which is not sensible for the '
'bound {}'.format(i, entry, self.frame_count))
if entry < 0:
out[i] += self.frame_count
return out
else:
raise TypeError(
'The final slice dimension is of unsupported type {}'.format(type(the_frame)))
def _parse_slicing(self, item) -> Tuple[slice, slice, Optional[int]]:
row_range, col_range, the_frame = super(SubapertureCalculator, self)._parse_slicing(item)
return row_range, col_range, self._parse_frame_argument(the_frame)
def subaperture_generator(
self,
row_range: Union[slice, Tuple[int, int, int]],
col_range: Union[slice, Tuple[int, int, int]],
frames: Union[None, int, list, tuple, numpy.ndarray] = None) -> Generator[numpy.ndarray, None, None]:
"""
Supplies a generator for the given row and column ranges and frames collection.
**Note that this IGNORES the block_size parameter in fetching, and fetches the
entire required block.**
The full resolution data in the processing dimension is required, even if
down-sampled by the row_range or col_range parameter.
Parameters
----------
row_range : slice|Tuple[int, int, int]
The row range.
col_range : slice|Tuple[int, int, int]
The column range.
frames : None|int|list|tuple|numpy.ndarray
The frame or frame collection.
Returns
-------
Generator[numpy.ndarray]
"""
def get_dimension_details(
the_range: Union[slice, Tuple[int, int, int]]) -> Tuple[Tuple[int, int, int], int, int]:
if isinstance(the_range, Sequence):
start, stop, step = the_range
elif isinstance(the_range, slice):
start = the_range.start
stop = the_range.stop
step = the_range.step
else:
raise TypeError('Got unexpected range input {}'.format(the_range))
the_snip = -1 if step < 0 else 1
t_full_range = (start, stop, the_snip)
t_full_size = stop - start
t_step = abs(step)
return t_full_range, t_full_size, t_step
if self._fill is None:
raise ValueError('Unable to proceed unless the index and dimension are set.')
frames = self._parse_frame_argument(frames)
if isinstance(frames, int):
frames = [frames, ]
if isinstance(row_range, tuple):
row_slice = slice(*row_range)
else:
row_slice = row_range
if isinstance(col_range, tuple):
col_slice = slice(*col_range)
else:
col_slice = row_range
if self.dimension == 0:
# determine the full resolution block of data to fetch
this_row_range, full_size, step = get_dimension_details(row_range)
# fetch the necessary data block
data = self.reader[(slice(*this_row_range), col_slice, self.index)]
else:
# determine the full resolution block of data to fetch
this_col_range, full_size, step = get_dimension_details(col_range)
# fetch the necessary data block, and transform to phase space
data = self.reader[(row_slice, slice(*this_col_range), self.index)]
# handle any nonsense data as 0
data[~numpy.isfinite(data)] = 0
# transform the data to phase space
data = fftshift(fft(data, axis=self.dimension), axes=self.dimension)
# define our frame collection
frame_collection, output_resolution = frame_definition(
full_size, frame_count=self.frame_count, aperture_fraction=self.aperture_fraction,
fill=self.fill, method=self.method)
# iterate over frames and generate the results
for frame_index in frames:
frame_def = frame_collection[int(frame_index)]
this_subap_data = subaperture_processing_phase_history(
data, frame_def, output_resolution=output_resolution, dimension=self.dimension)
if step == 1:
yield this_subap_data
elif self.dimension == 0:
yield this_subap_data[::step, :]
else:
yield this_subap_data[:, ::step]
def _prepare_output(
self,
row_range: Union[slice, Tuple[int, int, int]],
col_range: Union[slice, Tuple[int, int, int]],
frames: Union[None, int, list, tuple, numpy.ndarray] = None) -> numpy.ndarray:
row_start, row_stop, row_step = row_range if isinstance(row_range, tuple) else \
(row_range.start, row_range.stop, row_range.step)
if row_stop is None:
if row_step > 0:
raise ValueError('Got unexpected row_range {}'.format(row_range))
row_stop = -1
col_start, col_stop, col_step = col_range if isinstance(col_range, tuple) else \
(col_range.start, col_range.stop, col_range.step)
if col_stop is None:
if col_step > 0:
raise ValueError('Got unexpected col_range {}'.format(col_range))
col_stop = -1
row_count = int((row_stop - row_start)/float(row_step))
col_count = int((col_stop - col_start)/float(col_step))
if frames is None or len(frames) == 1:
out_size = (row_count, col_count)
else:
out_size = (row_count, col_count, len(frames))
return numpy.zeros(out_size, dtype=numpy.complex64)
def __getitem__(self, item) -> numpy.ndarray:
"""
Fetches the csi data based on the input slice. Slicing in the final
dimension using an integer, slice, or integer array is supported. Note
that this could easily be memory intensive, and should be used with
some care.
Parameters
----------
item
Returns
-------
numpy.ndarray
"""
if self._fill is None:
raise ValueError('Unable to proceed unless the index and dimension are set.')
# parse the slicing to ensure consistent structure
row_range, col_range, frames = self._parse_slicing(item)
if isinstance(frames, int):
frames = [frames, ]
if self.dimension == 0:
column_block_size = self.get_fetch_block_size(row_range.start, row_range.stop)
# get our block definitions
column_blocks, result_blocks = self.extract_blocks(col_range, column_block_size)
if column_blocks == 1 and len(frames) == 1:
# no need to prepare output, which will take twice the memory, so just return
out = self.subaperture_generator(row_range, col_range, frames).__next__()
else:
out = self._prepare_output(row_range, col_range, frames=frames)
# noinspection PyTypeChecker
for this_column_range, result_range in zip(column_blocks, result_blocks):
generator = self.subaperture_generator(row_range, this_column_range, frames)
if len(frames) == 1:
out[:, result_range[0]:result_range[1]] = generator.__next__()
else:
for i, data in enumerate(generator):
out[:, result_range[0]:result_range[1], i] = data
else:
row_block_size = self.get_fetch_block_size(col_range.start, col_range.stop)
# get our block definitions
row_blocks, result_blocks = self.extract_blocks(row_range, row_block_size)
if row_blocks == 1 and len(frames) == 1:
out = self.subaperture_generator(row_range, col_range, frames).__next__()
else:
out = self._prepare_output(row_range, col_range, frames=frames)
# noinspection PyTypeChecker
for this_row_range, result_range in zip(row_blocks, result_blocks):
generator = self.subaperture_generator(this_row_range, col_range, frames)
if len(frames) == 1:
out[result_range[0]:result_range[1], :] = generator.__next__()
else:
for i, data in enumerate(generator):
out[result_range[0]:result_range[1], :, i] = data
return out
class SubapertureOrthoIterator(OrthorectificationIterator):
"""
An iterator class for the ortho-rectified subaperture processing.
Iterating depth first requires the least fetching from the reader once for
all frames. Otherwise, iterating requires redundantly fetching data once
for each frame.
It should be noted that fetching data is not time intensive if working using
a local file (i.e. on your computer), but it may be if working using some
kind of network file system.
"""
__slots__ = ('_depth_first', '_this_frame', '_generator')
def __init__(
self,
ortho_helper: OrthorectificationHelper,
calculator: SubapertureCalculator,
bounds: Union[None, numpy.ndarray, tuple, list] = None,
remap_function: Optional[RemapFunction] = None,
recalc_remap_globals: bool = False,
depth_first: bool = True):
"""
Parameters
----------
ortho_helper : OrthorectificationHelper
calculator : SubapertureCalculator
bounds : None|numpy.ndarray|list|tuple
The pixel bounds of the form `(min row, max row, min col, max col)`.
This will default to the full image.
remap_function : None|RemapFunction
The remap function to apply, if desired.
recalc_remap_globals : bool
Only applies if a remap function is provided, should we recalculate
any required global parameters? This will automatically happen if
they are not already set.
depth_first : bool
If `True`, by image segment part then frame - this requires the least
fetching from the reader, once across all frames. Otherwise, iteration
will proceed by frames and then image segment - this requires more
fetching from the reader, once per frame.
"""
self._generator = None
self._this_frame = None
self._depth_first = bool(depth_first)
if not isinstance(calculator, SubapertureCalculator):
raise TypeError(
'calculator must be an instance of SubapertureCalculator. \n\t'
'Got type {}'.format(type(calculator)))
super(SubapertureOrthoIterator, self).__init__(
ortho_helper, calculator=calculator, bounds=bounds,
remap_function=remap_function, recalc_remap_globals=recalc_remap_globals)
@property
def calculator(self) -> SubapertureCalculator:
# noinspection PyTypeChecker
return self._calculator
def _depth_first_iteration(self) -> Tuple[numpy.ndarray, Tuple[int, int], int]:
if not self._depth_first:
raise ValueError('Requires depth_first = True')
# determine our current state
if self._this_index is None or self._this_frame is None:
self._this_index = 0
self._this_frame = 0
else:
self._this_frame += 1
if self._this_frame >= self.calculator.frame_count:
self._this_index += 1
self._this_frame = 0
# at this point, _this_index & _this_frame indicates which entry to return
if self._this_index >= len(self._iteration_blocks):
self._this_index = None # reset the iteration scheme
self._this_frame = None
raise StopIteration()
this_ortho_bounds, this_pixel_bounds = self._get_state_parameters()
# accommodate for real pixel limits
this_pixel_bounds = self._ortho_helper.get_real_pixel_bounds(this_pixel_bounds)
if self._this_frame == 0:
# set up the iterator from the calculator
self._generator = self.calculator.subaperture_generator(
(this_pixel_bounds[0], this_pixel_bounds[1], 1),
(this_pixel_bounds[2], this_pixel_bounds[3], 1))
logger.info(
'Fetching orthorectified coordinate block ({}:{}, {}:{}) of ({}:{}) for frame {}'.format(
this_ortho_bounds[0] - self.ortho_bounds[0], this_ortho_bounds[1] - self.ortho_bounds[0],
this_ortho_bounds[2] - self.ortho_bounds[2], this_ortho_bounds[3] - self.ortho_bounds[2],
self.ortho_bounds[1] - self.ortho_bounds[0], self.ortho_bounds[3] - self.ortho_bounds[2],
self._this_frame))
data = self._generator.__next__()
ortho_data = self._get_orthorectified_version(this_ortho_bounds, this_pixel_bounds, data)
start_indices = (this_ortho_bounds[0] - self.ortho_bounds[0],
this_ortho_bounds[2] - self.ortho_bounds[2])
return ortho_data, start_indices, self._this_frame
def _frame_first_iteration(self) -> Tuple[numpy.ndarray, Tuple[int, int], int]:
if self._depth_first:
raise ValueError('Requires depth_first = False')
# determine our current state
if self._this_index is None or self._this_frame is None:
self._this_index = 0
self._this_frame = 0
else:
self._this_index += 1
if self._this_index >= len(self._iteration_blocks):
self._this_frame += 1
self._this_index = 0
# at this point, _this_index & _this_frame indicates which entry to return
if self._this_frame >= self.calculator.frame_count:
raise StopIteration()
# calculate our result
this_ortho_bounds, this_pixel_bounds = self._get_state_parameters()
# accommodate for real pixel limits
this_pixel_bounds = self._ortho_helper.get_real_pixel_bounds(this_pixel_bounds)
logger.info(
'Fetching orthorectified coordinate block ({}:{}, {}:{}) of ({}:{}) for frame {}'.format(
this_ortho_bounds[0] - self.ortho_bounds[0], this_ortho_bounds[1] - self.ortho_bounds[0],
this_ortho_bounds[2] - self.ortho_bounds[2], this_ortho_bounds[3] - self.ortho_bounds[2],
self.ortho_bounds[1] - self.ortho_bounds[0], self.ortho_bounds[3] - self.ortho_bounds[2],
self._this_frame))
data = self.calculator[
this_pixel_bounds[0]:this_pixel_bounds[1],
this_pixel_bounds[2]:this_pixel_bounds[3],
self._this_frame]
ortho_data = self._get_orthorectified_version(this_ortho_bounds, this_pixel_bounds, data)
start_indices = (this_ortho_bounds[0] - self.ortho_bounds[0],
this_ortho_bounds[2] - self.ortho_bounds[2])
return ortho_data, start_indices, self._this_frame
def __next__(self) -> Tuple[numpy.ndarray, Tuple[int, int], int]:
"""
Get the next iteration of ortho-rectified data.
Returns
-------
data: numpy.ndarray
normalized_indices: Tuple[int, int]
The (normalized) indices (start_row, start_col) for this section of
data, relative to overall output shape
frame: int
The frame index.
"""
# NB: this is the Python 3 pattern for iteration
if self._depth_first:
return self._depth_first_iteration()
else:
return self._frame_first_iteration()
def next(self) -> Tuple[numpy.ndarray, Tuple[int, int], int]:
"""
Get the next iteration of ortho-rectified data.
Returns
-------
data: numpy.ndarray
normalized_indices: Tuple[int, int]
The (normalized) indices (start_row, start_col) for this section of
data, relative to overall output shape
frame: int
The frame index.
"""
# NB: this is the Python 2 pattern for iteration
return self.__next__()
class ApertureFilter(object):
"""
This is a calculator for filtering SAR imagery using a subregion of complex
fft data over a full resolution subregion of the original SAR data.
This is largely intended as a helper function for the aperture GUI tool,
but is included here because it may have wider applicability.
"""
__slots__ = (
'_deskew_calculator', '_sub_image_bounds', '_normalized_phase_history')
def __init__(
self,
reader: SICDTypeReader,
dimension: int = 1,
index: int = 0,
apply_deskew: bool = True,
apply_deweighting: bool = False):
"""
Parameters
----------
reader : SICDTypeReader
dimension : int
index : int
apply_deskew : bool
apply_deweighting : bool
"""
self._normalized_phase_history = None
self._deskew_calculator = DeskewCalculator(
reader, dimension=dimension, index=index, apply_deskew=apply_deskew, apply_deweighting=apply_deweighting)
self._sub_image_bounds = None
@property
def apply_deskew(self) -> bool:
"""
bool: Apply deskew to calculated value.
"""
return self._deskew_calculator.apply_deskew
@apply_deskew.setter
def apply_deskew(self, value):
self._deskew_calculator.apply_deskew = value
self._set_normalized_phase_history()
@property
def apply_deweighting(self) -> bool:
"""
bool: Apply deweighting to calculated values.
"""
return self._deskew_calculator.apply_deweighting
@apply_deweighting.setter
def apply_deweighting(self, val):
self._deskew_calculator.apply_deweighting = val
self._set_normalized_phase_history()
def _get_fft_complex_data(self, cdata: numpy.ndarray) -> numpy.ndarray:
"""
Transform the complex image data to phase history data.
Parameters
----------
cdata : numpy.ndarray
Returns
-------
numpy.ndarray
"""
return fftshift(fft2_sicd(cdata, self.sicd))
def _get_fft_phase_data(self, ph_data: numpy.ndarray) -> numpy.ndarray:
"""
Transforms the phase history data to complex image data.
Parameters
----------
ph_data : numpy.ndarray
Returns
-------
numpy.ndarray
"""
return ifft2_sicd(ph_data, self.sicd)
@property
def sicd(self) -> SICDType:
"""
SICDType: The associated SICD structure.
"""
return self._deskew_calculator.sicd
@property
def dimension(self) -> int:
"""
int: The processing dimension.
"""
return self._deskew_calculator.dimension
@property
def data_size(self) -> Optional[Tuple[int, int]]:
"""
None|(int, int): The feasible data size
"""
if self._sub_image_bounds is None:
return None
row_bounds, col_bounds = self._sub_image_bounds
return int(row_bounds[1] - row_bounds[0]), int(col_bounds[1] - col_bounds[0])
@dimension.setter
def dimension(self, val):
"""
Parameters
----------
val : int
Returns
-------
None
"""
self._deskew_calculator.dimension = val
self._set_normalized_phase_history()
@property
def flip_x_axis(self) -> bool:
try:
return self.sicd.SCPCOA.SideOfTrack == "L"
except AttributeError:
return False
@property
def sub_image_bounds(self) -> Tuple[Tuple[int, int], ...]:
"""
Tuple[Tuple[int, int], ...]: The sub-image bounds used for processing.
"""
return self._sub_image_bounds
def set_sub_image_bounds(
self,
row_bounds: Optional[Tuple[int, int]],
col_bounds: Optional[Tuple[int, int]]):
"""
Sets the full range bounds for the phase history calculation. This subsequently
sets the `normalized_phase_history` value.
Parameters
----------
row_bounds : None|Tuple[int, int]
Of the form `(start row, end row)`.
col_bounds : None|Tuple[int, int]
Of the form `(start column, end column)`.
Returns
-------
None
"""
def validate_entry(entry):
if len(entry) != 2:
raise ValueError('Bounds must have length 2. Got {}'.format(entry))
out = (int(entry[0]), int(entry[1]))
if out[0] >= out[1]:
raise ValueError('Bounds must have int(bound[0]) < int(bound[1]). Got {}'.format(entry))
return out
if row_bounds is None or col_bounds is None:
self._sub_image_bounds = None
else:
self._sub_image_bounds = (validate_entry(row_bounds), validate_entry(col_bounds))
self._set_normalized_phase_history()
@property
def normalized_phase_history(self) -> Optional[numpy.ndarray]:
"""
None|numpy.ndarray: The normalized phase history
"""
return self._normalized_phase_history
def _set_normalized_phase_history(self) -> None:
"""
Sets the normalized phase history.
Returns
-------
None
"""
if self._sub_image_bounds is None:
self._normalized_phase_history = None
return
row_bounds, col_bounds = self._sub_image_bounds
underlying_size = self._deskew_calculator.data_size
if row_bounds[0] < 0 or row_bounds[1] > underlying_size[0]:
raise ValueError(
'Desired row_bounds given as {}, and underlying data size is {}'.format(row_bounds, underlying_size))
if col_bounds[0] < 0 or col_bounds[1] > underlying_size[1]:
raise ValueError(
'Desired col_bounds given as {}, and underlying data size is {}'.format(col_bounds, underlying_size))
deskewed_data = self._deskew_calculator[row_bounds[0]:row_bounds[1], col_bounds[0]:col_bounds[1]]
self._normalized_phase_history = self._get_fft_complex_data(deskewed_data)
@property
def polar_angles(self) -> numpy.ndarray:
angle_width = (1 / self.sicd.Grid.Col.SS) / self.sicd.Grid.Row.KCtr
if self.sicd.Grid.Col.KCtr:
angle_ctr = self.sicd.Grid.Col.KCtr
else:
angle_ctr = 0
angle_limits = angle_ctr + numpy.array([-1, 1]) * angle_width / 2
if self.flip_x_axis:
angle_limits = angle_limits[1], angle_limits[0]
angles = numpy.linspace(angle_limits[0], angle_limits[1], self.normalized_phase_history.shape[1])
return numpy.rad2deg(numpy.arctan(angles))
@property
def frequencies(self) -> numpy.ndarray:
"""
This returns the subaperture frequencies in units of GHz
Returns
-------
numpy.array
"""
freq_width = (1 / self.sicd.Grid.Row.SS) * (speed_of_light / 2)
freq_ctr = self.sicd.Grid.Row.KCtr * (speed_of_light / 2)
freq_limits = freq_ctr + (numpy.array([-1, 1]) * freq_width / 2)
if self.sicd.PFA:
freq_limits = freq_limits / self.sicd.PFA.SpatialFreqSFPoly[0]
freq_limits = freq_limits/1e9
frequencies = numpy.linspace(freq_limits[1], freq_limits[0], self.normalized_phase_history.shape[0])
return frequencies
def __getitem__(self, item) -> Optional[numpy.ndarray]:
if self.normalized_phase_history is None:
return None
filtered_cdata = numpy.zeros(self.normalized_phase_history.shape, dtype='complex64')
filtered_cdata[item] = self.normalized_phase_history[item]
# do the inverse transform of this sampled portion
return self._get_fft_phase_data(filtered_cdata)
| 34,669 | 36.400216 | 117 | py |
sarpy | sarpy-master/sarpy/processing/sicd/normalize_sicd.py | """
Methods for transforming SICD data to a common state.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from tempfile import mkstemp
import os
from typing import Dict, Tuple, Optional, Union
import numpy
from numpy.polynomial import polynomial
from numpy.random import randn
from scipy.signal import resample
from sarpy.io.general.base import SarpyIOError
from sarpy.processing.ortho_rectify import FullResolutionFetcher
from sarpy.processing.sicd.fft_base import fft, ifft, fftshift, ifftshift, \
fft_sicd, ifft_sicd
from sarpy.io.complex.base import FlatSICDReader, SICDTypeReader
from sarpy.io.complex.converter import open_complex
from sarpy.io.complex.sicd import SICDWriter
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.Grid import WgtTypeType
logger = logging.getLogger(__name__)
##################
# helper functions
def apply_skew_poly(
input_data: numpy.ndarray,
delta_kcoa_poly: numpy.ndarray,
row_array: numpy.ndarray,
col_array: numpy.ndarray,
fft_sgn: int,
dimension: int,
forward: bool = False) -> numpy.ndarray:
"""
Performs the skew operation on the complex array, according to the provided
delta kcoa polynomial.
Parameters
----------
input_data : numpy.ndarray
The input data.
delta_kcoa_poly : numpy.ndarray
The delta kcoa polynomial to use.
row_array : numpy.ndarray
The row array, should agree with input_data first dimension definition.
col_array : numpy.ndarray
The column array, should agree with input_data second dimension definition.
fft_sgn : int
The fft sign to use.
dimension : int
The dimension to apply along.
forward : bool
If True, this shifts forward (i.e. skews), otherwise applies in inverse
(i.e. deskew) direction.
Returns
-------
numpy.ndarray
"""
if numpy.all(delta_kcoa_poly == 0):
return input_data
delta_kcoa_poly_int = polynomial.polyint(delta_kcoa_poly, axis=dimension)
if forward:
fft_sgn *= -1
return input_data*numpy.exp(1j*fft_sgn*2*numpy.pi*polynomial.polygrid2d(
row_array, col_array, delta_kcoa_poly_int))
def determine_weight_array(
input_data_shape: Tuple[int, ...],
weight_array: numpy.ndarray,
oversample_rate: Union[int, float],
dimension: int) -> Tuple[numpy.ndarray, int, int]:
"""
Determine the appropriate resampled weight array and bounds.
Parameters
----------
input_data_shape : tuple
The shape of the input data, which should be a two element tuple.
weight_array : numpy.ndarray
oversample_rate : int|float
dimension : int
Returns
-------
weight_array : numpy.ndarray
The weight array assuming nominal sampling. In the presence of
oversampling, this is shorter than relevant dimension of the actual data.
start_index : int
The starting index along the given dimension to which to apply weight.
This will be `0` if not over-sampled.
end_index : int
The (non-inclusive) final index along the given dimension to apply
weight. This will be `input_data_shape[dimension]` if not oversampled.
"""
if not (isinstance(weight_array, numpy.ndarray) and weight_array.ndim == 1):
raise ValueError('The weight array must be one-dimensional')
weight_size = round(input_data_shape[dimension]/oversample_rate)
if weight_array.ndim != 1:
raise ValueError('weight_array must be one dimensional.')
weight_ind_start = int(numpy.floor(0.5*(input_data_shape[dimension] - weight_size)))
weight_ind_end = weight_ind_start + weight_size
if weight_array.size == weight_size:
return weight_array, weight_ind_start, weight_ind_end
else:
return resample(weight_array, weight_size), weight_ind_start, weight_ind_end
def apply_weight_array(
input_data: numpy.ndarray,
weight_array: numpy.ndarray,
oversample_rate: Union[int, float],
dimension: int,
inverse: bool = False) -> numpy.ndarray:
"""
Apply the weight array along the given dimension.
Parameters
----------
input_data : numpy.ndarray
The complex data array to weight.
weight_array : numpy.ndarray
The weight array.
oversample_rate : int|float
The oversample rate.
dimension : int
Along which dimension to apply the weighting? Must be one of `{0, 1}`.
inverse : bool
If `True`, this divides the weight (i.e. de-weight), otherwise it multiplies.
Returns
-------
numpy.ndarray
"""
if not (isinstance(input_data, numpy.ndarray) and input_data.ndim == 2):
raise ValueError('The data array must be two-dimensional')
if weight_array is None:
# nothing to be done
return input_data
weight_array, weight_ind_start, weight_ind_end = determine_weight_array(
input_data.shape, weight_array, oversample_rate, dimension)
if inverse and numpy.any(weight_array == 0):
raise ValueError('inverse=True and the weight array contains some zero entries.')
output_data = fftshift(fft(input_data, axis=dimension), axes=dimension)
if dimension == 0:
if inverse:
output_data[weight_ind_start:weight_ind_end, :] /= weight_array[:, numpy.newaxis]
else:
output_data[weight_ind_start:weight_ind_end, :] *= weight_array[:, numpy.newaxis]
else:
if inverse:
output_data[:, weight_ind_start:weight_ind_end] /= weight_array
else:
output_data[:, weight_ind_start:weight_ind_end] *= weight_array
return ifft(ifftshift(output_data, axes=dimension), axis=dimension)
def _add_poly(poly1: numpy.ndarray, poly2: numpy.ndarray) -> numpy.ndarray:
"""
Add two-dimensional polynomials together.
Parameters
----------
poly1 : numpy.ndarray
poly2 : numpy.ndarray
Returns
-------
numpy.ndarray
"""
if not isinstance(poly1, numpy.ndarray) and poly1.ndim == 2:
raise TypeError('poly1 must be a two-dimensional numpy array.')
if not isinstance(poly2, numpy.ndarray) and poly2.ndim == 2:
raise TypeError('poly2 must be a two-dimensional numpy array.')
out = numpy.zeros((max(poly1.shape[0], poly2.shape[0]), max(poly1.shape[1], poly2.shape[1])), dtype='float64')
out[:poly1.shape[0], :poly1.shape[1]] += poly1
out[:poly2.shape[0], :poly2.shape[1]] += poly2
return out
def _get_deskew_params(
the_sicd: SICDType,
dimension: int) -> Tuple[numpy.ndarray, int]:
"""
Gets the basic deskew parameters.
Parameters
----------
the_sicd : SICDType
dimension : int
Returns
-------
delta_kcoa_poly: numpy.ndarray
fft_sign : int
"""
# define the derived variables
delta_kcoa_poly = numpy.array([[0, ], ], dtype=numpy.float64)
fft_sign = -1
if dimension == 0:
try:
delta_kcoa_poly = the_sicd.Grid.Row.DeltaKCOAPoly.get_array(dtype='float64')
except (ValueError, AttributeError):
pass
try:
fft_sign = the_sicd.Grid.Row.Sgn
except (ValueError, AttributeError):
pass
else:
try:
delta_kcoa_poly = the_sicd.Grid.Col.DeltaKCOAPoly.get_array(dtype='float64')
except (ValueError, AttributeError):
pass
try:
fft_sign = the_sicd.Grid.Col.Sgn
except (ValueError, AttributeError):
pass
return delta_kcoa_poly, fft_sign
##########
# sicd state checking functions
def is_not_skewed(sicd: SICDType, dimension: int) -> bool:
"""
Check if the sicd structure is not skewed along the provided dimension.
Parameters
----------
sicd : SICDType
dimension : int
Returns
-------
bool
"""
if dimension == 0:
if sicd.Grid is None or sicd.Grid.Row is None or sicd.Grid.Row.DeltaKCOAPoly is None:
return True
return numpy.all(sicd.Grid.Row.DeltaKCOAPoly.get_array(dtype='float64') == 0)
else:
if sicd.Grid is None or sicd.Grid.Col is None or sicd.Grid.Col.DeltaKCOAPoly is None:
return True
return numpy.all(sicd.Grid.Col.DeltaKCOAPoly.get_array(dtype='float64') == 0)
def is_uniform_weight(sicd: SICDType, dimension: int) -> bool:
"""
Check if the sicd structure is has uniform weight along the provided dimension.
Parameters
----------
sicd : SICDType
dimension : int
Returns
-------
bool
"""
if dimension == 0:
if sicd.Grid is None or sicd.Grid.Row is None:
return False
dir_param = sicd.Grid.Row
else:
if sicd.Grid is None or sicd.Grid.Col is None:
return False
dir_param = sicd.Grid.Col
if dir_param.WgtType is not None and dir_param.WgtType.WindowName == 'UNIFORM':
return True
if dir_param.WgtFunct is not None:
return numpy.all(dir_param.WgtFunct == dir_param.WgtFunct[0])
return True
def is_normalized(sicd: SICDType, dimension: int = 1) -> bool:
"""
Check if the sicd structure is normalized along the provided dimension.
Parameters
----------
sicd : SICDType
The SICD structure.
dimension : int
The dimension to test.
Returns
-------
bool
normalization state in the given dimension
"""
def _is_fft_sgn_negative() -> bool:
if dimension == 0:
if sicd.Grid is None or sicd.Grid.Row is None or sicd.Grid.Row.Sgn is None:
return True
return sicd.Grid.Row.Sgn == -1
else:
if sicd.Grid is None or sicd.Grid.Col is None or sicd.Grid.Col.Sgn is None:
return True
return sicd.Grid.Col.Sgn == -1
dimension = int(dimension)
if dimension not in [0, 1]:
raise ValueError('dimension must be either 0 or 1, got {}'.format(dimension))
return is_not_skewed(sicd, dimension) and is_uniform_weight(sicd, dimension) and \
_is_fft_sgn_negative()
###########
# calculator class, intended mainly for use in aperture tool
class DeskewCalculator(FullResolutionFetcher):
"""
This is a calculator for deskewing/deweighting which requires full resolution
in both dimensions.
"""
__slots__ = (
'_apply_deskew', '_apply_deweighting', '_apply_off_axis', '_delta_kcoa_poly_axis', '_delta_kcoa_poly_off_axis',
'_row_fft_sgn', '_col_fft_sgn',
'_row_shift', '_row_mult', '_col_shift', '_col_mult',
'_row_weight', '_row_pad', '_col_weight', '_col_pad',
'_is_normalized', '_is_not_skewed_row', '_is_not_skewed_col',
'_is_uniform_weight_row', '_is_uniform_weight_col', )
def __init__(self,
reader: SICDTypeReader,
dimension: int = 1,
index: int = 0,
apply_deskew: bool = True,
apply_deweighting: bool = False,
apply_off_axis: bool = True):
"""
Parameters
----------
reader : SICDTypeReader
dimension : int
The dimension in `{0, 1}` along which to deskew. `0` is row/range/fast-time,
and `1` is column/azimuth/slow-time.
index : int
The reader index to utilize
apply_deskew : bool
Deskew along the given axis?
apply_deweighting : bool
Deweight?
apply_off_axis : bool
Deskew off axis, to the extent possible?
"""
self._apply_deskew = apply_deskew
self._apply_deweighting = apply_deweighting
self._apply_off_axis = apply_off_axis
self._delta_kcoa_poly_axis = None
self._delta_kcoa_poly_off_axis = None
self._row_fft_sgn = None
self._row_shift = None
self._row_mult = None
self._col_fft_sgn = None
self._col_shift = None
self._col_mult = None
self._row_weight = None
self._row_pad = None
self._col_weight = None
self._col_pad = None
self._is_normalized = None
self._is_not_skewed_row = None
self._is_not_skewed_col = None
self._is_uniform_weight_row = None
self._is_uniform_weight_col = None
super(DeskewCalculator, self).__init__(
reader, dimension=dimension, index=index, block_size=None)
@property
def dimension(self) -> int:
"""
int: The dimension along which to perform the color subaperture split.
"""
return self._dimension
@dimension.setter
def dimension(self, value) -> None:
value = int(value)
if value not in [0, 1]:
raise ValueError('dimension must be 0 or 1, got {}'.format(value))
self._dimension = value
if self._sicd is not None:
self._set_sicd(self._sicd)
def _set_index(self, value) -> None:
value = int(value)
if value < 0:
raise ValueError('The index must be a non-negative integer, got {}'.format(value))
# noinspection PyUnresolvedReferences
sicds = self.reader.get_sicds_as_tuple()
if value >= len(sicds):
raise ValueError('The index must be less than the sicd count.')
self._index = value
self._set_sicd(sicds[value])
self._data_size = self.reader.get_data_size_as_tuple()[value]
def _set_sicd(self, the_sicd: SICDType) -> None:
if the_sicd is None:
self._sicd = None
return
if not isinstance(the_sicd, SICDType):
raise TypeError('the_sicd must be an insatnce of SICDType, got type {}'.format(type(the_sicd)))
self._sicd = the_sicd
row_delta_kcoa_poly, self._row_fft_sgn = _get_deskew_params(the_sicd, 0)
col_delta_kcoa_poly, self._col_fft_sgn = _get_deskew_params(the_sicd, 1)
if self.dimension == 0:
self._delta_kcoa_poly_axis = row_delta_kcoa_poly
delta_kcoa_poly_int = polynomial.polyint(row_delta_kcoa_poly, axis=0)
self._delta_kcoa_poly_off_axis = _add_poly(-polynomial.polyder(delta_kcoa_poly_int, axis=1),
col_delta_kcoa_poly)
else:
self._delta_kcoa_poly_axis = col_delta_kcoa_poly
delta_kcoa_poly_int = polynomial.polyint(col_delta_kcoa_poly, axis=1)
self._delta_kcoa_poly_off_axis = _add_poly(-polynomial.polyder(delta_kcoa_poly_int, axis=0),
row_delta_kcoa_poly)
self._row_shift = the_sicd.ImageData.SCPPixel.Row - the_sicd.ImageData.FirstRow
self._row_mult = the_sicd.Grid.Row.SS
self._col_shift = the_sicd.ImageData.SCPPixel.Col - the_sicd.ImageData.FirstCol
self._col_mult = the_sicd.Grid.Col.SS
self._row_pad = max(1., 1./(the_sicd.Grid.Row.SS*the_sicd.Grid.Row.ImpRespBW))
self._row_weight = the_sicd.Grid.Row.WgtFunct.copy() if the_sicd.Grid.Row.WgtFunct is not None else None
self._col_pad = max(1., 1./(the_sicd.Grid.Col.SS*the_sicd.Grid.Col.ImpRespBW))
self._col_weight = the_sicd.Grid.Col.WgtFunct.copy() if the_sicd.Grid.Col.WgtFunct is not None else None
self._is_normalized = is_normalized(the_sicd, self.dimension)
self._is_not_skewed_row = is_not_skewed(the_sicd, 0)
self._is_not_skewed_col = is_not_skewed(the_sicd, 1)
self._is_uniform_weight_row = is_uniform_weight(the_sicd, 0)
self._is_uniform_weight_col = is_uniform_weight(the_sicd, 1)
@property
def apply_deskew(self) -> bool:
"""
bool: Apply deskew to calculated value. This is for API completeness.
"""
return self._apply_deskew
@apply_deskew.setter
def apply_deskew(self, value):
self._apply_deskew = (value is True)
@property
def apply_deweighting(self) -> bool:
"""
bool: Apply deweighting to calculated values.
"""
return self._apply_deweighting
@apply_deweighting.setter
def apply_deweighting(self, value):
self._apply_deweighting = (value is True)
def _get_index_arrays(
self,
row_range: Tuple[int, Union[int, None]],
row_step: int,
col_range: Tuple[int, Union[int, None]],
col_step: int) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Get index array data for polynomial evaluation.
Parameters
----------
row_range : tuple
row_step : int
col_range : tuple
col_step : int
Returns
-------
(numpy.ndarray, numpy.ndarray)
"""
row_array = self._row_mult*(numpy.arange(row_range[0], -1 if row_range[1] is None else row_range[1], row_step) - self._row_shift)
col_array = self._col_mult*(numpy.arange(col_range[0], -1 if col_range[1] is None else col_range[1], col_step) - self._col_shift)
return row_array, col_array
def __getitem__(self, item) -> numpy.ndarray:
"""
Fetches the processed data based on the input slice.
Parameters
----------
item
Returns
-------
numpy.ndarray
"""
def on_axis_deskew(t_full_data, fft_sgn):
return apply_skew_poly(
t_full_data, self._delta_kcoa_poly_axis, row_array, col_array,
fft_sgn, self.dimension, forward=False)
def other_axis_deskew(t_full_data, fft_sgn):
# We cannot generally deskew in both directions at once, but we
# can recenter the nonskewed dimension with a uniform shift
if numpy.any(self._delta_kcoa_poly_off_axis != 0):
# get deltakcoa at midpoint, and treat as a constant polynomial
row_mid = row_array[int(round(0.5 * row_array.size)) - 1]
col_mid = col_array[int(round(0.5 * col_array.size)) - 1]
delta_kcoa_new_const = numpy.zeros((1, 1), dtype='float64')
delta_kcoa_new_const[0, 0] = polynomial.polyval2d(
row_mid, col_mid, self._delta_kcoa_poly_off_axis)
# apply this uniform shift
t_full_data = apply_skew_poly(
t_full_data, delta_kcoa_new_const, row_array, col_array,
fft_sgn, 1-self.dimension, forward=False)
return t_full_data
if self._is_normalized or not self.apply_deskew:
# just fetch the data and return
if not isinstance(item, tuple) or len(item) != 2:
raise KeyError(
'Slicing in the deskew calculator must be two dimensional. '
'Got slice item {}'.format(item))
return self.reader.__getitem__((item[0], item[1], self.index))
# parse the slicing to ensure consistent structure
row_range, col_range, _ = self._parse_slicing(item)
# get full resolution data in both directions
row_step = 1 if row_range.step > 0 else -1
col_step = 1 if col_range.step > 0 else -1
full_data = self.reader[
row_range.start:row_range.stop:row_step,
col_range.start:col_range.stop:col_step,
self.index]
# de-weight in each applicable direction
if self._apply_deweighting and self._is_not_skewed_row and not self._is_uniform_weight_row:
full_data = apply_weight_array(full_data, self._row_weight, self._row_pad, 0, inverse=True)
if self._apply_deweighting and self._is_not_skewed_col and not self._is_uniform_weight_col:
full_data = apply_weight_array(full_data, self._col_weight, self._col_pad, 1, inverse=True)
# deskew in our given dimension
row_array, col_array = self._get_index_arrays(
(row_range.start, row_range.stop), row_step,
(col_range.start, col_range.stop), col_step)
if self.dimension == 0:
# deskew on axis, if necessary
if not self._is_not_skewed_row:
full_data = on_axis_deskew(full_data, self._row_fft_sgn)
if self._apply_deweighting:
full_data = apply_weight_array(full_data, self._row_weight, self._row_pad, 0, inverse=True)
if self._apply_off_axis:
# deskew off axis, to the extent possible
full_data = other_axis_deskew(full_data, self._col_fft_sgn)
elif self.dimension == 1:
# deskew on axis, if necessary
if not self._is_not_skewed_col:
full_data = on_axis_deskew(full_data, self._col_fft_sgn)
if self._apply_deweighting:
full_data = apply_weight_array(full_data, self._col_weight, self._col_pad, 1, inverse=True)
if self._apply_off_axis:
# deskew off axis, to the extent possible
full_data = other_axis_deskew(full_data, self._row_fft_sgn)
return full_data[::abs(row_range.step), ::abs(col_range.step)]
def aperture_dimension_limits(
sicd: SICDType,
dimension: int,
dimension_limits: Optional[Tuple[Union[int, float], Union[int, float]]] = None,
aperture_limits: Optional[Tuple[Union[int, float], Union[int, float]]] = None) -> Tuple[Tuple[int, int], Tuple[int, int]]:
"""
This is a helper method to determine the "correct" effective limits for aperture
processing along the given dimension, considering the ImpRespBW values.
Parameters
----------
sicd : SICDType
dimension : int
One of `{0, 1}`, for the processing dimension
dimension_limits : None|Tuple[int|float, int|float]
The base limits along the given dimension, will default to `(0, rows)` for `dimension=0`
or `(0, columns)` for `dimension=1`, if not provided.
aperture_limits : None|Tuple[int|float, int|float]
The desired aperture limits, relative to `dimension_limits`.
Returns
-------
dimension_limits : Tuple[int, int]
The explicitly populated effective limits along the given dimension.
aperture_limits : Tuple[int, int]
The valid aperture limits, relative to `dimension_limits`, after considerations
of the impulse response bandwidth along the dimension.
"""
def validate_tuple(tup: Optional[tuple], limit: int) -> Tuple[int, int]:
if tup is None:
return 0, limit
out = int(numpy.floor(tup[0])), int(numpy.ceil(tup[1]))
if not (0 <= out[0] < out[1] <= limit):
raise ValueError('Got invalid tuple `{}` for limit `{}`'.format(tup, limit))
return out
def extrema_tuple(tup1: tuple, tup2: tuple) -> Tuple[int, int]:
return int(numpy.floor(max(tup1[0], tup2[0]))), int(numpy.ceil(min(tup1[1], tup2[1])))
dimension = int(dimension)
if dimension not in [0, 1]:
raise ValueError('Got invalid dimension value')
if dimension == 0:
the_limit = sicd.ImageData.NumRows
the_oversample = sicd.Grid.Row.get_oversample_rate()
else:
the_limit = sicd.ImageData.NumCols
the_oversample = sicd.Grid.Col.get_oversample_rate()
dimension_limits = validate_tuple(dimension_limits, the_limit)
dimension_count = dimension_limits[1] - dimension_limits[0]
aperture_limits = validate_tuple(aperture_limits, dimension_count)
ap_size = dimension_count/the_oversample
ap_limits = (0.5 * (dimension_count - ap_size), 0.5 * (dimension_count + ap_size))
ap_limits = extrema_tuple(aperture_limits, ap_limits)
return dimension_limits, ap_limits
def aperture_dimension_params(
sicd: SICDType,
dimension: int,
dimension_limits: Optional[Tuple[Union[int, float], Union[int, float]]] = None,
aperture_limits: Optional[Tuple[int, int]] = None,
new_weight_function: Optional[numpy.ndarray] = None):
"""
Gets the aperture processing parameters along the given dimension.
Parameters
----------
sicd : SICDType
dimension : int
One of `{0, 1}`, for the processing dimension
dimension_limits : None|tuple[int|float, int|float]
The base limits along the given dimension, will default to `(0, rows)`
for `dimension=0` or `(0, columns)` for `dimension=1`, if not provided.
aperture_limits : tuple[int, int]
The valid aperture limits, relative to `dimension_limits`, after
considerations of the impulse response bandwidth along the dimension.
new_weight_function : None|numpy.ndarray
The new weight function. This will default to the current weight function
if not provided.
Returns
-------
dimension_limits : tuple[int, int]
The explicitly populated effective limits along the given dimension.
cur_aperture_limits : tuple[int, int]
The current valid aperture limits, relative to `dimension_limits`, after
considerations of the impulse response bandwidth along the dimension.
cur_aperture_weighting : numpy.ndarray
new_aperture_limits : tuple[int, int]
The new valid aperture limits, relative to `dimension_limits`, after
considerations of the impulse response bandwidth along the dimension.
new_aperture_weighting : numpy.ndarray
"""
dimension = int(dimension)
if dimension not in [0, 1]:
raise ValueError('Got invalid dimension value')
dimension_limits, cur_aperture_limits = aperture_dimension_limits(sicd, dimension, dimension_limits, None)
_, new_aperture_limits = aperture_dimension_limits(sicd, dimension, dimension_limits, aperture_limits)
cur_ap_count = cur_aperture_limits[1] - cur_aperture_limits[0]
new_ap_count = new_aperture_limits[1] - new_aperture_limits[0]
if dimension == 0:
cur_weight_function = resample(sicd.Grid.Row.WgtFunct, cur_ap_count)
if new_weight_function is None:
new_weight_function = resample(sicd.Grid.Row.WgtFunct, new_ap_count)
else:
new_weight_function = resample(new_weight_function, new_ap_count)
else:
cur_weight_function = resample(sicd.Grid.Col.WgtFunct, cur_ap_count)
if new_weight_function is None:
new_weight_function = resample(sicd.Grid.Col.WgtFunct, new_ap_count)
else:
new_weight_function = resample(new_weight_function, new_ap_count)
return dimension_limits, cur_aperture_limits, cur_weight_function, new_aperture_limits, new_weight_function
def noise_scaling(
cur_ap_limits: Tuple[int, int],
cur_weighting: numpy.ndarray,
new_ap_limits: Tuple[int, int],
new_weighting: numpy.ndarray) -> float:
"""
Gets noise scaling due to sub-aperture degradation and re-weighting along one
dimension.
Parameters
----------
cur_ap_limits : Tuple[int, int]
cur_weighting : numpy.ndarray
new_ap_limits : Tuple[int, int]
new_weighting : numpy.ndarray
Returns
-------
noise_multiplier : float
"""
start_lim = new_ap_limits[0]-cur_ap_limits[0]
end_lim = start_lim + new_ap_limits[1] - new_ap_limits[0]
weight_change = new_weighting/cur_weighting[start_lim:end_lim]
second_moment = numpy.sum(weight_change**2)/float(cur_weighting.size)
return second_moment
def sicd_degrade_reweight(
reader: SICDTypeReader,
output_file: Optional[str] = None,
index: int = 0,
row_limits: Optional[Tuple[int, int]] = None,
column_limits: Optional[Tuple[int, int]] = None,
row_aperture: Optional[Tuple[int, int]] = None,
row_weighting: Optional[Dict] = None,
column_aperture: Optional[Tuple[int, int]] = None,
column_weighting: Optional[Dict] = None,
add_noise: Optional[float] = None,
pixel_threshold: Optional[int] = 1500*1500,
check_existence: bool = True,
check_older_version: bool = False,
repopulate_rniirs: bool = True) -> Optional[FlatSICDReader]:
r"""
Given input, create a SICD (file or reader) with modified weighting/subaperture
parameters. Any additional noise will be added **before** performing any sub-aperture
degradation processing.
Recall that reducing the size of the impulse response bandwidth via sub-aperture
degradation in a single direction by by :math:`ratio`, actually decreases the
magnitude of the noise in pixel power by :math:`ratio`, or subtracts
:math:`10*\log_{10}(ratio)` from the noise given in dB.
.. warning::
To ensure correctness of metadata, if the Noise Polynomial is present,
then then the NoiseLevelType must be `'ABSOLUTE'`. Otherwise an exception
will be raised.
Parameters
----------
reader : str|SICDTypeReader
A sicd type reader.
output_file : None|str
If `None`, an in-memory SICD reader instance will be returned. Otherwise,
this is the path for the produced output SICD file.
index : int
The reader index to be used.
row_limits : None|(int, int)
Row limits for the underlying data.
column_limits : None|(int, int)
Column limits for the underlying data.
row_aperture : None|tuple
`None` (no row subaperture), or integer valued `start_row, end_row` for the
row subaperture definition. This is with respect to row values AFTER
considering `row_limits`. Note that this will reduce the noise, so the
noise polynomial (if it is populated) will be modified.
row_weighting : None|dict
`None` (no row weighting change), or the new row weighting parameters
`{'WindowName': <name>, 'Parameters: {}, 'WgtFunct': array}`.
column_aperture : None|tuple
`None` (no column subaperture), or integer valued `start_col, end_col` for the
column sub-aperture definition. This is with respect to row values AFTER
considering `column_limits`. Note that this will reduce the noise, so the
noise polynomial (if it is populated) will be modified.
column_weighting : None|dict
`None` (no columng weighting change), or the new column weighting parameters
`{'WindowName': <name>, 'Parameters: {}, 'WgtFunct': array}`.
add_noise : None|float
If provided, Gaussian white noise of pixel power `add_noise` will be
added, prior to any subbaperture processing. Note that performing subaperture
processing actually reduces the resulting noise, which will also be considered.
pixel_threshold : None|int
Approximate pixel area threshold for performing this directly in memory.
check_existence : bool
Should we check if the given file already exists, and raise an exception if so?
check_older_version : bool
Try to use a less recent version of SICD (1.1), for possible application compliance issues?
repopulate_rniirs : bool
Should we try to repopulate the estimated RNIIRS value?
Returns
-------
None|FlatSICDReader
No return if `output_file` is provided, otherwise the returns the in-memory
reader object.
"""
def validate_filename():
if output_file is None:
return
if check_existence and os.path.exists(output_file):
raise SarpyIOError('The file {} already exists.'.format(output_file))
def validate_sicd(the_sicd):
if the_sicd.Grid is None or the_sicd.Grid.Row is None or the_sicd.Grid.Col is None:
raise ValueError('Grid.Row and Grid.Col must be populated')
for direction in ['Row', 'Col']:
el = getattr(the_sicd.Grid, direction)
if el.DeltaKCOAPoly is None or el.WgtFunct is None:
raise ValueError('DeltaKCOAPoly and WgtFunct must be populated for both Row and Col')
def validate_limits(lims, max_index):
if lims is None:
return 0, max_index
_lims = (int(lims[0]), int(lims[1]))
if not (0 <= _lims[0] < _lims[1] <= max_index):
raise ValueError('Got poorly formatted index limit {}'.format(lims))
return _lims
def get_iterations(max_index, other_index):
if in_memory:
return [(0, max_index), ]
out = []
block = int(pixel_threshold / float(other_index))
_start_ind = 0
while _start_ind < max_index:
_end_ind = min(_start_ind + block, max_index)
out.append((_start_ind, _end_ind))
_start_ind = _end_ind
return out
def get_direction_array_meters(dimension, start_index, end_index):
if dimension == 0:
shift = sicd.ImageData.FirstRow - sicd.ImageData.SCPPixel.Row
multiplier = sicd.Grid.Row.SS
else:
shift = sicd.ImageData.FirstCol - sicd.ImageData.SCPPixel.Col
multiplier = sicd.Grid.Col.SS
return (numpy.arange(start_index, end_index) + shift)*multiplier
def do_add_noise():
if add_noise is None:
return
# noinspection PyBroadException
try:
variance = float(add_noise)
if variance <= 0:
logger.error('add_noise was provided as `{}`, but must be a positive number'.format(add_noise))
return
except Exception:
logger.error('add_noise was provided as `{}`, but must be a positive number'.format(add_noise))
return
sigma = numpy.sqrt(0.5*variance)
if noise_level is not None:
noise_constant_power = numpy.exp(numpy.log(10)*0.1*noise_level.NoisePoly[0, 0])
noise_constant_power += variance
noise_constant_db = 10*numpy.log10(noise_constant_power)
noise_level.NoisePoly[0, 0] = noise_constant_db
for (_start_ind, _stop_ind) in row_iterations:
d_shape = (_stop_ind - _start_ind, data_shape[1])
added_noise = numpy.empty(d_shape, dtype='complex64')
added_noise[:].real = randn(*d_shape).astype('float32')
added_noise[:].imag = randn(*d_shape).astype('float32')
added_noise *= sigma
working_data[_start_ind:_stop_ind, :] += added_noise
def do_dimension(dimension):
if dimension == 0:
dir_params = sicd.Grid.Row
aperture_in = row_aperture
weighting_in = row_weighting
dimension_limits = row_limits
else:
dir_params = sicd.Grid.Col
aperture_in = column_aperture
weighting_in = column_weighting
dimension_limits = column_limits
not_skewed = is_not_skewed(sicd, dimension)
uniform_weight = is_uniform_weight(sicd, dimension)
delta_kcoa = dir_params.DeltaKCOAPoly.get_array(dtype='float64')
st_beam_comp = sicd.ImageFormation.STBeamComp if sicd.ImageFormation is not None else None
if dimension == 1 and (not uniform_weight or weighting_in is not None):
if st_beam_comp is None:
logger.warning(
'Processing along the column direction requires modification\n\t'
'of the original weighting scheme, and the value for\n\t'
'ImageFormation.STBeamComp is not populated.\n\t'
'It is unclear how imperfect the de-weighting effort along the column may be.')
elif st_beam_comp == 'NO':
logger.warning(
'Processing along the column direction requires modification\n\t'
'of the original weighting scheme, and the value for\n\t'
'ImageFormation.STBeamComp is populated as `NO`.\n\t'
'It is likely that the de-weighting effort along the column is imperfect.')
if aperture_in is None and weighting_in is None:
# nothing to be done in this dimension
return noise_adjustment_multiplier
new_weight = None if weighting_in is None else weighting_in['WgtFunct']
dimension_limits, cur_aperture_limits, cur_weight_function, \
new_aperture_limits, new_weight_function = aperture_dimension_params(
old_sicd, dimension, dimension_limits=dimension_limits,
aperture_limits=aperture_in, new_weight_function=new_weight)
index_count = dimension_limits[1] - dimension_limits[0]
cur_center_index = 0.5*(cur_aperture_limits[0] + cur_aperture_limits[1])
new_center_index = 0.5*(new_aperture_limits[0] + new_aperture_limits[1])
noise_multiplier = noise_scaling(
cur_aperture_limits, cur_weight_function, new_aperture_limits, new_weight_function)
# perform deskew, if necessary
if not not_skewed:
if dimension == 0:
row_array = get_direction_array_meters(0, 0, out_data_shape[0])
for (_start_ind, _stop_ind) in col_iterations:
col_array = get_direction_array_meters(1, _start_ind, _stop_ind)
working_data[:, _start_ind:_stop_ind] = apply_skew_poly(
working_data[:, _start_ind:_stop_ind], delta_kcoa,
row_array, col_array, dir_params.Sgn, 0, forward=False)
else:
col_array = get_direction_array_meters(1, 0, out_data_shape[1])
for (_start_ind, _stop_ind) in row_iterations:
row_array = get_direction_array_meters(0, _start_ind, _stop_ind)
working_data[_start_ind:_stop_ind, :] = apply_skew_poly(
working_data[_start_ind:_stop_ind, :], delta_kcoa,
row_array, col_array, dir_params.Sgn, 1, forward=False)
# perform fourier transform along the given dimension
if dimension == 0:
for (_start_ind, _stop_ind) in col_iterations:
working_data[:, _start_ind:_stop_ind] = fftshift(
fft_sicd(working_data[:, _start_ind:_stop_ind], dimension, sicd), axes=dimension)
else:
for (_start_ind, _stop_ind) in row_iterations:
working_data[_start_ind:_stop_ind, :] = fftshift(
fft_sicd(working_data[_start_ind:_stop_ind, :], dimension, sicd), axes=dimension)
# perform deweight, if necessary
if not uniform_weight:
if dimension == 0:
working_data[cur_aperture_limits[0]:cur_aperture_limits[1], :] /= cur_weight_function[:, numpy.newaxis]
else:
working_data[:, cur_aperture_limits[0]:cur_aperture_limits[1]] /= cur_weight_function
# do sub-aperture, if necessary
if aperture_in is not None:
if dimension == 0:
working_data[:new_aperture_limits[0], :] = 0
working_data[new_aperture_limits[1]:, :] = 0
else:
working_data[:, :new_aperture_limits[0]] = 0
working_data[:, new_aperture_limits[1]:] = 0
the_ratio = float(new_aperture_limits[1] - new_aperture_limits[0]) / \
float(cur_aperture_limits[1] - cur_aperture_limits[0])
# modify the ImpRespBW value (derived ImpRespWid handled at the end)
dir_params.ImpRespBW *= the_ratio
# perform reweight, if necessary
if weighting_in is not None:
if dimension == 0:
working_data[new_aperture_limits[0]:new_aperture_limits[1], :] *= new_weight_function[:, numpy.newaxis]
else:
working_data[:, new_aperture_limits[0]:new_aperture_limits[1]] *= new_weight_function
# modify the weight definition
dir_params.WgtType = WgtTypeType(
WindowName=weighting_in['WindowName'],
Parameters=weighting_in.get('Parameters', None))
dir_params.WgtFunct = weighting_in['WgtFunct'].copy()
elif not uniform_weight:
if dimension == 0:
working_data[new_aperture_limits[0]:new_aperture_limits[1], :] *= new_weight_function[:, numpy.newaxis]
else:
working_data[:, new_aperture_limits[0]:new_aperture_limits[1]] *= new_weight_function
# perform inverse fourier transform along the given dimension
if dimension == 0:
for (_start_ind, _stop_ind) in col_iterations:
working_data[:, _start_ind:_stop_ind] = ifft_sicd(
ifftshift(working_data[:, _start_ind:_stop_ind], axes=dimension), dimension, sicd)
else:
for (_start_ind, _stop_ind) in row_iterations:
working_data[_start_ind:_stop_ind, :] = ifft_sicd(
ifftshift(working_data[_start_ind:_stop_ind, :], axes=dimension), dimension, sicd)
# perform the (original) reskew, if necessary
if not numpy.all(delta_kcoa == 0):
if dimension == 0:
row_array = get_direction_array_meters(0, 0, out_data_shape[0])
for (_start_ind, _stop_ind) in col_iterations:
col_array = get_direction_array_meters(1, _start_ind, _stop_ind)
working_data[:, _start_ind:_stop_ind] = apply_skew_poly(
working_data[:, _start_ind:_stop_ind], delta_kcoa,
row_array, col_array, dir_params.Sgn, 0, forward=True)
else:
col_array = get_direction_array_meters(1, 0, out_data_shape[1])
for (_start_ind, _stop_ind) in row_iterations:
row_array = get_direction_array_meters(0, _start_ind, _stop_ind)
working_data[_start_ind:_stop_ind, :] = apply_skew_poly(
working_data[_start_ind:_stop_ind, :], delta_kcoa,
row_array, col_array, dir_params.Sgn, 1, forward=True)
# modify the delta_kcoa_poly - introduce the shift necessary for additional offset
if new_center_index != cur_center_index:
additional_shift = dir_params.Sgn*(cur_center_index - new_center_index)/float(index_count*dir_params.SS)
delta_kcoa[0, 0] += additional_shift
dir_params.DeltaKCOAPoly = delta_kcoa
return noise_adjustment_multiplier*noise_multiplier
if isinstance(reader, str):
reader = open_complex(reader)
if not isinstance(reader, SICDTypeReader):
raise TypeError('reader must be sicd type reader, got {}'.format(reader))
# noinspection PyUnresolvedReferences
old_sicd = reader.get_sicds_as_tuple()[index]
validate_sicd(old_sicd)
validate_filename()
data_shape = reader.get_data_size_as_tuple()[index]
redo_geo = (row_limits is not None or column_limits is not None)
row_limits = validate_limits(row_limits, data_shape[0])
column_limits = validate_limits(column_limits, data_shape[1])
# prepare our working sicd structure
sicd = old_sicd.copy() # type: SICDType
noise_level = None if sicd.Radiometric is None else sicd.Radiometric.NoiseLevel
# NB: as an alternative, we could drop the noise polynomial?
if add_noise is not None and \
noise_level is not None and \
noise_level.NoiseLevelType != 'ABSOLUTE':
raise ValueError(
'add_noise is provided, but the radiometric noise is populated,\n\t'
'with `NoiseLevelType={}`'.format(noise_level.NoiseLevelType))
sicd.ImageData.FirstRow += row_limits[0]
sicd.ImageData.NumRows = row_limits[1] - row_limits[0]
sicd.ImageData.FirstCol += column_limits[0]
sicd.ImageData.NumCols = column_limits[1] - column_limits[0]
if redo_geo:
sicd.define_geo_image_corners(override=True)
out_data_shape = (sicd.ImageData.NumRows, sicd.ImageData.NumCols)
pixel_area = out_data_shape[0]*out_data_shape[1]
temp_file = None
in_memory = True if (output_file is None or pixel_threshold is None) else (pixel_area < pixel_threshold)
row_iterations = get_iterations(out_data_shape[0], out_data_shape[1])
col_iterations = get_iterations(out_data_shape[1], out_data_shape[0])
if in_memory:
working_data = reader[row_limits[0]:row_limits[1], column_limits[0]:column_limits[1], index]
else:
_, temp_file = mkstemp(suffix='.sarpy.cache', text=False)
working_data = numpy.memmap(temp_file, dtype='complex64', mode='r+', offset=0, shape=out_data_shape)
for (start_ind, stop_ind) in row_iterations:
working_data[start_ind:stop_ind, :] = reader[
start_ind+row_limits[0]:stop_ind+row_limits[0],
column_limits[0]:column_limits[1], index]
noise_adjustment_multiplier = 1.0
# NB: I'm adding Gaussian white noise first
do_add_noise()
# do, as necessary, along the row
noise_adjustment_multiplier = do_dimension(0)
# do, as necessary, along the row
noise_adjustment_multiplier = do_dimension(1)
# re-derive the various ImpResp parameters
sicd.Grid.derive_direction_params(sicd.ImageData, populate=True)
# adjust the noise polynomial
if noise_level is not None:
noise_level.NoisePoly[0, 0] += 10*numpy.log10(noise_adjustment_multiplier)
if sicd.Radiometric is not None and sicd.Radiometric.RCSSFPoly is not None:
sf_adjust = old_sicd.Grid.get_slant_plane_area()/noise_adjustment_multiplier
sicd.Radiometric.RCSSFPoly.Coefs = sicd.Radiometric.RCSSFPoly.get_array()*sf_adjust
sicd.Radiometric.BetaZeroSFPoly.Coefs = sicd.Radiometric.BetaZeroSFPoly.get_array() / \
noise_adjustment_multiplier
sicd.Radiometric.SigmaZeroSFPoly.Coefs = sicd.Radiometric.SigmaZeroSFPoly.get_array() / \
noise_adjustment_multiplier
sicd.Radiometric.GammaZeroSFPoly.Coefs = sicd.Radiometric.GammaZeroSFPoly.get_array() / \
noise_adjustment_multiplier
if sicd.RMA is not None and sicd.RMA.INCA is not None and sicd.RMA.INCA.TimeCAPoly is not None:
# redefine the INCA doppler centroid poly to be in keeping with any redefinition of our Col.DeltaKCOAPoly?
sicd.RMA.INCA.DopCentroidPoly = sicd.Grid.Col.DeltaKCOAPoly.get_array(dtype='float64') / \
sicd.RMA.INCA.TimeCAPoly[1]
if repopulate_rniirs:
sicd.populate_rniirs(override=True)
if output_file is None:
return FlatSICDReader(sicd, working_data)
else:
# write out the new sicd file
with SICDWriter(
output_file, sicd,
check_older_version=check_older_version, check_existence=check_existence) as writer:
for (start_ind, stop_ind) in row_iterations:
writer.write_chip(working_data[start_ind:stop_ind, :], start_indices=(start_ind, 0))
if temp_file is not None and os.path.exists(temp_file):
working_data = None
os.remove(temp_file)
| 47,145 | 39.819048 | 137 | py |
sarpy | sarpy-master/sarpy/processing/sicd/rgiqe.py | """
Radar Generalized Image Quality Equation (RGIQE) calculation(s) and tools for
application to SICD structures and files.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from typing import Union, Tuple, Dict, Optional
import numpy
from sarpy.io.complex.base import SICDTypeReader, FlatSICDReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.processing.sicd.windows import get_hamming_broadening_factor
from sarpy.processing.sicd.normalize_sicd import sicd_degrade_reweight, is_uniform_weight
from sarpy.io.complex.converter import open_complex
logger = logging.getLogger(__name__)
RNIIRS_FIT_PARAMETERS = numpy.array([3.4761, 0.4357], dtype='float64')
"""
The RNIIRS calculation parameters determined by empirical data fit,
parameters updated on 2022-02-01
"""
#####################
# methods for extracting necessary information from the sicd structure
def _verify_sicd_with_noise(sicd: SICDType) -> None:
"""
Verify that the sicd is appropriately populated with noise.
Parameters
----------
sicd : SICDType
"""
if sicd.Radiometric is None:
raise ValueError(
'Radiometric is not populated,\n\t'
'so no noise estimate can be derived.')
if sicd.Radiometric.SigmaZeroSFPoly is None:
raise ValueError(
'Radiometric.SigmaZeroSFPoly is not populated,\n\t'
'so no sigma0 noise estimate can be derived.')
if sicd.Radiometric.NoiseLevel is None:
raise ValueError(
'Radiometric.NoiseLevel is not populated,\n\t'
'so no noise estimate can be derived.')
if sicd.Radiometric.NoiseLevel.NoiseLevelType != 'ABSOLUTE':
raise ValueError(
'Radiometric.NoiseLevel.NoiseLevelType is not `ABSOLUTE``,\n\t'
'so no noise estimate can be derived.')
def get_sigma0_noise(sicd: SICDType) -> float:
"""
Calculate the absolute noise estimate, in sigma0 power units.
Parameters
----------
sicd : SICDType
Returns
-------
float
"""
_verify_sicd_with_noise(sicd)
noise = sicd.Radiometric.NoiseLevel.NoisePoly[0, 0] # this is in db
noise = numpy.exp(numpy.log(10)*0.1*noise) # this is absolute
# convert to SigmaZero value
noise *= sicd.Radiometric.SigmaZeroSFPoly[0, 0]
return noise
def get_default_signal_estimate(sicd: SICDType) -> float:
"""
Gets default signal for use in the RNIIRS calculation. This will be
1.0 for copolar (or unknown) collections, and 0.25 for cross-pole
collections.
Parameters
----------
sicd : SICDType
Returns
-------
float
"""
if sicd.ImageFormation is None or sicd.ImageFormation.TxRcvPolarizationProc is None:
return 1.0
# use 1.0 for co-polar collection and 0.25 from cross-polar collection
pol = sicd.ImageFormation.TxRcvPolarizationProc
if pol is None or ':' not in pol:
return 1.0
pols = pol.split(':')
return 1.0 if pols[0] == pols[1] else 0.25
def get_bandwidth_area(sicd: SICDType) -> float:
"""
Calculate the bandwidth area.
Parameters
----------
sicd : SICDType
Returns
-------
float
"""
return abs(
sicd.Grid.Row.ImpRespBW *
sicd.Grid.Col.ImpRespBW *
numpy.cos(numpy.deg2rad(sicd.SCPCOA.SlopeAng)))
#########################
# methods for calculating information density and rniirs
def get_information_density(
bandwidth_area: Union[float, numpy.ndarray],
signal: Union[float, numpy.ndarray],
noise: Union[float, numpy.ndarray]) -> Union[float, numpy.ndarray]:
"""
Calculate the information density from bandwidth area and signal/noise estimates.
Parameters
----------
bandwidth_area : float|numpy.ndarray
signal : float|numpy.ndarray
noise : float|numpy.ndarray
Returns
-------
float|numpy.ndarray
"""
return bandwidth_area * numpy.log2(1 + signal/noise)
def get_rniirs(
information_density: Union[float, numpy.ndarray]) -> Union[float, numpy.ndarray]:
r"""
Calculate an RNIIRS estimate from the information density or
Shannon-Hartley channel capacity.
This mapping has been empirically determined by fitting Shannon-Hartley channel
capacity to RNIIRS for some sample images.
The basic model is given by
:math:`rniirs = a_0 + a_1*\log_2(information\_density)`
To maintain positivity of the estimated rniirs, this transitions to a linear
model :math:`rniirs = slope*information\_density` with slope given by
:math:`slope = a_1/(iim\_transition*\log(2))` below the transition point at
:math:`transition = \exp(1 - \log(2)*a_0/a_1)`.
Parameters
----------
information_density : float|numpy.ndarray
Returns
-------
float|numpy.ndarray
"""
a = RNIIRS_FIT_PARAMETERS
iim_transition = numpy.exp(1 - numpy.log(2) * a[0] / a[1])
slope = a[1] / (iim_transition * numpy.log(2))
if not isinstance(information_density, numpy.ndarray):
information_density = numpy.array(information_density, dtype='float64')
orig_ndim = information_density.ndim
if orig_ndim == 0:
information_density = numpy.reshape(information_density, (1, ))
out = numpy.empty(information_density.shape, dtype='float64')
mask = (information_density > iim_transition)
mask_other = ~mask
if numpy.any(mask):
out[mask] = a[0] + a[1]*numpy.log2(information_density[mask])
if numpy.any(mask_other):
out[mask_other] = slope*information_density[mask_other]
if orig_ndim == 0:
return float(out[0])
return out
def get_information_density_for_rniirs(
rniirs: Union[float, numpy.ndarray]) -> Union[float, numpy.ndarray]:
"""
The inverse of :func:`get_rniirs`, this determines the information density
which yields the given RNIIRS.
*New in version 1.2.35.*
Parameters
----------
rniirs : float|numpy.ndarray
Returns
-------
float|numpy.ndarray
"""
a = RNIIRS_FIT_PARAMETERS
iim_transition = numpy.exp(1 - numpy.log(2) * a[0] / a[1])
slope = a[1] / (iim_transition * numpy.log(2))
rniirs_transition = slope*iim_transition
if not isinstance(rniirs, numpy.ndarray):
rniirs = numpy.array(rniirs, dtype='float64')
orig_ndim = rniirs.ndim
if orig_ndim == 0:
rniirs = numpy.reshape(rniirs, (1, ))
out = numpy.empty(rniirs.shape, dtype='float64')
mask = (rniirs > rniirs_transition)
mask_other = ~mask
if numpy.any(mask):
out[mask] = numpy.exp2((rniirs[mask] - a[0])/a[1])
if numpy.any(mask_other):
out[mask_other] = rniirs[mask_other]/slope
if orig_ndim == 0:
return float(out[0])
return out
def snr_to_rniirs(
bandwidth_area: Union[float, numpy.ndarray],
signal: Union[float, numpy.ndarray],
noise: Union[float, numpy.ndarray]) -> Tuple[Union[float, numpy.ndarray], Union[float, numpy.ndarray]]:
"""
Calculate the information_density and RNIIRS estimate from bandwidth area and
signal/noise estimates.
It is assumed that geometric effects for signal and noise have been accounted for
(i.e. use SigmaZeroSFPoly), and signal and noise have each been averaged to a
single pixel value.
This mapping has been empirically determined by fitting Shannon-Hartley channel
capacity to RNIIRS for some sample images.
Parameters
----------
bandwidth_area : float
signal : float
noise : float
Returns
-------
information_density : float
rniirs : float
"""
information_density = get_information_density(bandwidth_area, signal, noise)
rniirs = get_rniirs(information_density)
return information_density, rniirs
def rgiqe(sicd: SICDType) -> Tuple[float, float]:
"""
Calculate the information_density and (default) estimated RNIIRS for the
given sicd.
Parameters
----------
sicd : SICDType
Returns
-------
information_density : float
rniirs : float
"""
bandwidth_area = get_bandwidth_area(sicd)
signal = get_default_signal_estimate(sicd)
noise = get_sigma0_noise(sicd)
return snr_to_rniirs(bandwidth_area, signal, noise)
def populate_rniirs_for_sicd(
sicd: SICDType,
signal: Optional[float] = None,
noise: Optional[float] = None,
override: bool = False) -> None:
"""
This populates the value(s) for RNIIRS and information density in the SICD
structure, according to the RGIQE. **This modifies the sicd structure in place.**
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
signal : None|float
The signal value, in sigma zero.
noise : None|float
The noise equivalent sigma zero value.
override : bool
Override the value, if present.
"""
if sicd.CollectionInfo is None:
logger.error(
'CollectionInfo must not be None.\n\t'
'Nothing to be done for calculating RNIIRS.')
return
if sicd.CollectionInfo.Parameters is not None and \
sicd.CollectionInfo.Parameters.get('PREDICTED_RNIIRS', None) is not None:
if override:
logger.info('PREDICTED_RNIIRS already populated, and this value will be overridden.')
else:
logger.info('PREDICTED_RNIIRS already populated. Nothing to be done.')
return
if noise is None:
try:
noise = get_sigma0_noise(sicd)
except Exception as e:
logger.error(
'Encountered an error estimating noise for RNIIRS.\n\t{}'.format(e))
return
if signal is None:
signal = get_default_signal_estimate(sicd)
try:
bw_area = get_bandwidth_area(sicd)
except Exception as e:
logger.error(
'Encountered an error estimating bandwidth area for RNIIRS\n\t{}'.format(e))
return
inf_density, rniirs = snr_to_rniirs(bw_area, signal, noise)
logger.info(
'Calculated INFORMATION_DENSITY = {0:0.5G},\n\t'
'PREDICTED_RNIIRS = {1:0.5G}'.format(inf_density, rniirs))
if sicd.CollectionInfo.Parameters is None:
sicd.CollectionInfo.Parameters = {} # initialize
sicd.CollectionInfo.Parameters['INFORMATION_DENSITY'] = '{0:0.2G}'.format(inf_density)
sicd.CollectionInfo.Parameters['PREDICTED_RNIIRS'] = '{0:0.1f}'.format(rniirs)
def get_bandwidth_noise_distribution(
sicd: SICDType,
alpha: Union[float, numpy.ndarray],
desired_information_density: Optional[float] = None,
desired_rniirs: Optional[float] = None
) -> Tuple[Union[Tuple[float, float], numpy.ndarray], Union[float, numpy.ndarray]]:
r"""
This function determines SICD degradation parameters (nominally symmetric in
row/column subaperture degradation) to achieve the desired information density/rniirs.
There is natural one parameter distribution of reducing bandwidth and adding noise
to achieve the desired RNIIRS/information density, based on :math:`\alpha \in [0, 1]`.
The nominal relation is
.. math::
desired\_information\_density = bandwidth\_area*(bw\_mult(\alpha))^2)\cdot\log_2\left(
1 + signal/(noise*noise\_mult(\alpha))\right).
For :math:`\alpha=0`, we add no additional noise (:math:`bw\_mult(0) = bw\_min,noise\_mult(0)=1`)
and use purely sub-aperture degradation, achieved at
.. math::
desired\_information\_density = bandwidth\_area*(bw\_min)^2)\cdot\log_2(1 + snr).
On the other end, at :math:`\alpha=1`, we have :math:`bw\_mult(1)=1, noise\_mult(1)=noise\_mult\_max`
and derive the noise multiplier which fulfills the required information density.
For intermediate :math:`0 < \alpha < 1`, we find
.. math::
bw_mult(\alpha) = bw\_min\cdot (1-\alpha)
Then, we find :math:`noise\_multiplier(\alpha)` which fulfills the required
information density.
.. note::
Choosing subaperture windows is fundamentally a discrete operation, and
this carries over to the realities of choosing bandwidth multipliers. See
:func:`get_bidirectional_bandwidth_multiplier_possibilities` for all
feasible values for bandwidth multipliers and associated noise adjustment
details.
*Refined in version 1.2.35.*
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
alpha : float|numpy.ndarray
desired_information_density : None|float
desired_rniirs : None|float
Returns
-------
bandwidth_multiplier : (float, float)|numpy.ndarray
The `(row, column)` bandwidth multiplier including the discrete nature
of this aperture, so the two may not be precisely equal.
noise_multiplier : float|numpy.ndarray
The noise multiplier, indicating how much noise to add before the
subaperture processing.
"""
# validate the desired information density/rniirs
if (desired_information_density is None and desired_rniirs is None) or \
(desired_information_density is not None and desired_rniirs is not None):
raise ValueError('Exactly one of desired_information_density and desired_rniirs must be provided')
if not isinstance(alpha, numpy.ndarray):
alpha = numpy.array(alpha, dtype='float64')
orig_ndim = alpha.ndim
if orig_ndim == 0:
alpha = numpy.reshape(alpha, (1, ))
if not numpy.all((alpha >= 0) & (alpha <= 1)):
raise ValueError('values for alpha must be in the interval [0, 1]')
# get the current information density
bandwidth_area = get_bandwidth_area(sicd)
signal = get_default_signal_estimate(sicd) # NB: this is just 1 or 0.25, no scaling issues
current_nesz = get_sigma0_noise(sicd)
snr = signal/current_nesz
current_inf_density = get_information_density(bandwidth_area, signal, current_nesz)
if desired_information_density is not None:
desired_information_density = float(desired_information_density)
elif desired_rniirs is not None:
desired_information_density = get_information_density_for_rniirs(float(desired_rniirs))
if desired_information_density > current_inf_density:
raise ValueError(
'The desired information density is {},\n\t'
'but the current deweighted information density is {}'.format(
desired_information_density, current_inf_density))
aperture_size, bw_multiplier = get_bidirectional_bandwidth_multiplier_possibilities(sicd)
# construct the whole list of bandwidth areas and resulting noises after
# subaperture degrading and deweighting
bw_areas = bandwidth_area*numpy.multiply.reduce(bw_multiplier, 1)
inf_densities = get_information_density(bw_areas, signal, current_nesz)
if desired_information_density < inf_densities[-1]:
raise ValueError(
'The desired information density is {},\n\t'
'but the minimum possible with pure subaperture degradation is {}'.format(
desired_information_density, inf_densities[-1]))
best_index = numpy.argmin((desired_information_density - inf_densities)**2)
indices = numpy.cast['int32'](best_index - alpha*best_index)
indices = numpy.clip(indices, 0, best_index)
this_bw_areas = bw_areas[indices]
# NB: inf_dens = bw_area*log2(1 + snr/mult))
# snr/mult = 2^(inf_dens/bw_area) - 1
# mult = snr/(2^(inf_dens/bw_area) - 1))
required_noise_multiplier = snr/(numpy.exp2(desired_information_density/this_bw_areas) - 1)
required_noise_multiplier[required_noise_multiplier < 1] = 1
bw_mult_out = numpy.empty(required_noise_multiplier.shape + (2, ), dtype='float64')
bw_mult_out[:, 0] = bw_multiplier[indices, 0]
bw_mult_out[:, 1] = bw_multiplier[indices, 1]
if orig_ndim == 0:
return (float(bw_mult_out[0, 0]), float(bw_mult_out[0, 1])), float(required_noise_multiplier[0])
return bw_mult_out, required_noise_multiplier
#########################
# helpers for quality degradation function
def _get_uniform_weight_dicts(
sicd: SICDType) -> Tuple[Optional[Dict], Optional[Dict]]:
"""
Gets the dictionaries denoting uniform weighting.
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
row_weighting : None|dict
column_weighting : None|dict
"""
row_weighting = None if is_uniform_weight(sicd, 0) else \
{'WindowName': 'UNIFORM', 'WgtFunct': numpy.ones((32,), dtype='float64')}
column_weighting = None if is_uniform_weight(sicd, 1) else \
{'WindowName': 'UNIFORM', 'WgtFunct': numpy.ones((32,), dtype='float64')}
return row_weighting, column_weighting
def _validate_reader(
reader: Union[str, SICDTypeReader],
index: int) -> Tuple[SICDTypeReader, int]:
"""
Validate the method input:
Parameters
----------
reader : str|SICDTypeReader
index : int
The reader index.
Returns
-------
reader: SICDTypeReader
index: int
"""
if isinstance(reader, str):
reader = open_complex(reader)
if not isinstance(reader, SICDTypeReader):
raise TypeError('reader input must be a path to a complex file, or a sicd type reader instance')
index = int(index)
if not (0 <= index < reader.image_count):
raise ValueError('index must be between 0 and {}, got {}'.format(reader.image_count, index))
return reader, index
def _map_desired_resolution_to_aperture(
current_imp_resp_bw: float,
sample_size: float,
direction: str,
direction_size: int,
desired_resolution: Optional[float] = None,
desired_bandwidth: Optional[float] = None,
broadening_factor: Optional[float] = None) -> Tuple[Optional[Tuple[int, int]], float]:
"""
Determine the appropriate symmetric subaperture range to achieve the desired
bandwidth or resolution, assuming the given broadening factor.
Parameters
----------
current_imp_resp_bw : float
sample_size : float
direction : str
direction_size : int
The size of the array along the given direction.
desired_resolution : None|float
The desired ImpRespWid (Row, Col) tuple, which will be mapped to ImpRespBW
assuming uniform weighting. Exactly one of `desired_resolution` and
`desired_bandwidth` must be provided.
desired_bandwidth : None|float
The desired ImpRespBW. Exactly one of `desired_resolution`
and `desired_bandwidth` must be provided.
broadening_factor : None|float
The only applies if `desired_resolution` is provided. If not provided,
then UNIFORM weighting will be assumed.
Returns
-------
indices: None|Tuple[int, int]
bw_factor : float
"""
if desired_resolution is None and desired_bandwidth is None:
raise ValueError('One of desire_resolution or desired_bandwidth must be supplied.')
if desired_resolution is not None:
if broadening_factor is None:
broadening_factor = get_hamming_broadening_factor(1.0)
else:
broadening_factor = float(broadening_factor)
use_resolution = float(desired_resolution)
use_bandwidth = broadening_factor/use_resolution
else:
use_bandwidth = float(desired_bandwidth)
if use_bandwidth > current_imp_resp_bw:
if desired_resolution is not None:
raise ValueError(
'After mapping from Desired {} ImpRespWid considering uniform weighting,\n\t'
'the equivalent desired ImpRespBW is {},\n\t'
'but the current ImpRespBW is {}'.format(direction, use_bandwidth, current_imp_resp_bw))
else:
raise ValueError(
'Desired {} ImpRespBW is given as {},\n\t'
'but the current ImpRespBW is {}'.format(direction, use_bandwidth, current_imp_resp_bw))
elif use_bandwidth == current_imp_resp_bw:
return None, 1.0
else:
oversample = max(1., 1./(sample_size*use_bandwidth))
ap_size = round(direction_size/oversample)
start_ind = int(numpy.floor(0.5*(direction_size - ap_size)))
return (start_ind, start_ind+ap_size), use_bandwidth/current_imp_resp_bw
def _map_bandwidth_parameters(
sicd: SICDType,
desired_resolution: Optional[Tuple[float, float]] = None,
desired_bandwidth: Optional[Tuple[float, float]] = None
) -> Tuple[Tuple[int, int], float, Tuple[int, int], float]:
"""
Helper function to map desired resolution or bandwidth to the suitable (centered)
aperture.
Parameters
----------
sicd : SICDType
desired_resolution : None|Tuple[float, float]
desired_bandwidth : None|Tuple[float, float]
Returns
-------
row_aperture : Tuple[int, int]
row_bw_factor : float
column_aperture : Tuple[int, int]
column_bw_factor : float
"""
if desired_resolution is not None:
# get the broadening factor for uniform weighting
broadening_factor = get_hamming_broadening_factor(1.0)
row_aperture, row_bw_factor = _map_desired_resolution_to_aperture(
sicd.Grid.Row.ImpRespBW, sicd.Grid.Row.SS, 'Row', sicd.ImageData.NumRows,
desired_resolution=desired_resolution[0], broadening_factor=broadening_factor)
column_aperture, column_bw_factor = _map_desired_resolution_to_aperture(
sicd.Grid.Col.ImpRespBW, sicd.Grid.Col.SS, 'Col', sicd.ImageData.NumCols,
desired_resolution=desired_resolution[1], broadening_factor=broadening_factor)
elif desired_bandwidth is not None:
row_aperture, row_bw_factor = _map_desired_resolution_to_aperture(
sicd.Grid.Row.ImpRespBW, sicd.Grid.Row.SS, 'Row', sicd.ImageData.NumRows,
desired_bandwidth=desired_bandwidth[0])
column_aperture, column_bw_factor = _map_desired_resolution_to_aperture(
sicd.Grid.Col.ImpRespBW, sicd.Grid.Col.SS, 'Col', sicd.ImageData.NumCols,
desired_bandwidth=desired_bandwidth[1])
else:
row_aperture, row_bw_factor = None, 1
column_aperture, column_bw_factor = None, 1
return row_aperture, row_bw_factor, column_aperture, column_bw_factor
def get_dimension_bandwidth_multiplier_possibilities(
sicd: SICDType,
dimension: int) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Gets the bandwidth possibilities for all centered subapertures along the given
dimension.
*Introduced in 1.2.35*
Parameters
----------
sicd : SICDType
dimension : int
One of `{0, 1}`.
Returns
-------
aperture_size : numpy.ndarray
Of shape `(N, )`
bandwidth_multiplier : numpy.ndarray
Of shape `(N, )`
"""
if dimension == 0:
ap_size = round(sicd.ImageData.NumRows / sicd.Grid.Row.get_oversample_rate())
else:
ap_size = round(sicd.ImageData.NumCols / sicd.Grid.Col.get_oversample_rate())
aperture_size = numpy.arange(ap_size, 0, -1, dtype='int32')
bandwidth_multiplier = aperture_size/float(ap_size)
return aperture_size, bandwidth_multiplier
def get_bidirectional_bandwidth_multiplier_possibilities(
sicd: SICDType) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Gets the bandwidth possibilities for all centered subapertures shrinking
along both dimensions symmetrically.
*Introduced in 1.2.35*
Parameters
----------
sicd : SICDType
Returns
-------
aperture_size : numpy.ndarray
An array of shape `(N, 2)` for row/column separately.
bandwidth_multiplier : numpy.ndarray
An array of shape `(N, 2)` for row/column separately.
"""
row_aperture_size, row_bw_multiplier = get_dimension_bandwidth_multiplier_possibilities(sicd, 0)
col_aperture_size, col_bw_multiplier = get_dimension_bandwidth_multiplier_possibilities(sicd, 1)
the_size = max(row_aperture_size.size, col_aperture_size.size)
aperture_size = numpy.empty((the_size, 2), dtype='int32')
bandwidth_multiplier = numpy.empty((the_size, 2), dtype='float64')
row_indexing = numpy.cast['int32'](
numpy.ceil(float(row_aperture_size.size - 1)*numpy.arange(the_size)/float(the_size - 1)))
col_indexing = numpy.cast['int32'](
numpy.ceil(float(col_aperture_size.size - 1)*numpy.arange(the_size)/float(the_size - 1)))
aperture_size[:, 0] = row_aperture_size[row_indexing]
aperture_size[:, 1] = col_aperture_size[col_indexing]
bandwidth_multiplier[:, 0] = row_bw_multiplier[row_indexing]
bandwidth_multiplier[:, 1] = col_bw_multiplier[col_indexing]
return aperture_size, bandwidth_multiplier
#########################
# SICD quality degradation functions
def quality_degrade(
reader: Union[str, SICDTypeReader],
index: int = 0,
output_file: Optional[str] = None,
desired_resolution: Optional[Tuple[float, float]] = None,
desired_bandwidth: Optional[Tuple[float, float]] = None,
desired_nesz: Optional[float] = None,
**kwargs) -> Optional[FlatSICDReader]:
r"""
Create a degraded quality SICD based on the desired resolution (impulse response width)
or bandwidth (impulse response bandwidth), and the desired Noise Equivalent
Sigma Zero value. The produced SICD will have **uniform weighting**.
No more than one of `desired_resolution` and `desired_bandwidth` can be provided.
If None of `desired_resolution`, `desired_bandwidth`, or `desired_nesz` are provided,
then the SICD will be re-weighted with uniform weighting - even this will
change the noise and RNIIRS values slightly.
.. warning::
Unless `desired_nesz=None`, this will fail for a SICD which is not fully
Radiometrically calibrated with `'ABSOLUTE'` noise type.
Parameters
----------
reader : str|SICDTypeReader
index : int
The reader index to be used.
output_file : None|str
If `None`, an in-memory SICD reader instance will be returned. Otherwise,
this is the path for the produced output SICD file.
desired_resolution : None|tuple
The desired ImpRespWid (Row, Col) tuple, which will be mapped to ImpRespBW
assuming uniform weighting. You cannot provide both `desired_resolution`
and `desired_bandwidth`.
desired_bandwidth : None|tuple
The desired ImpRespBW (Row, Col) tuple. You cannot provide both
`desired_resolution` and `desired_bandwidth`.
desired_nesz : None|float
The desired Noise Equivalent Sigma Zero value in power units, this is after
modifications which change the noise due to sub-aperture degradation and/or
de-weighting.
kwargs
Keyword arguments passed through to :func:`sarpy.processing.sicd.normalize_sicd.sicd_degrade_reweight`
Returns
-------
None|FlatSICDReader
No return if `output_file` is provided, otherwise the returns the in-memory
reader object.
"""
reader, index = _validate_reader(reader, index)
if desired_resolution is not None and desired_bandwidth is not None:
raise ValueError('Both desired_resolution and desired_bandwidth cannot be supplied.')
sicd = reader.get_sicds_as_tuple()[index]
if desired_nesz is None:
add_noise = None
else:
current_nesz = get_sigma0_noise(sicd)
add_noise_factor = (desired_nesz - current_nesz)/current_nesz
if abs(add_noise_factor) < 1e-5:
add_noise = None
elif add_noise_factor < 0:
raise ValueError(
'The current nesz value is {},\n\t'
'the desired nesz value of {} cannot be achieved.'.format(current_nesz, desired_nesz))
else:
add_noise = numpy.exp(numpy.log(10)*0.1*sicd.Radiometric.NoiseLevel.NoisePoly[0, 0])*add_noise_factor
row_aperture, row_bw_factor, column_aperture, column_bw_factor = _map_bandwidth_parameters(
sicd, desired_resolution=desired_resolution, desired_bandwidth=desired_bandwidth)
row_weighting, column_weighting = _get_uniform_weight_dicts(sicd)
return sicd_degrade_reweight(
reader, output_file=output_file, index=index,
row_aperture=row_aperture, row_weighting=row_weighting,
column_aperture=column_aperture, column_weighting=column_weighting,
add_noise=add_noise, **kwargs)
def quality_degrade_resolution(
reader: Union[str, SICDTypeReader],
index: int = 0,
output_file: Optional[str] = None,
desired_resolution: Optional[Tuple[float, float]] = None,
desired_bandwidth: Optional[Tuple[float, float]] = None,
**kwargs) -> Optional[FlatSICDReader]:
"""
Create a degraded quality SICD based on INCREASING the impulse response width
to the desired resolution or DECREASING the impulse response bandwidth to the
desired bandwidth.
The produced SICD will have uniform weighting.
Parameters
----------
reader : str|SICDTypeReader
index : int
The reader index to be used.
output_file : None|str
If `None`, an in-memory SICD reader instance will be returned. Otherwise,
this is the path for the produced output SICD file.
desired_resolution : None|tuple
The desired ImpRespWid (Row, Col) tuple, which will be mapped to ImpRespBW
assuming uniform weighting. Exactly one of `desired_resolution` and
`desired_bandwidth` must be provided.
desired_bandwidth : None|tuple
The desired ImpRespBW (Row, Col) tuple. Exactly one of `desired_resolution`
and `desired_bandwidth` must be provided.
kwargs
Keyword arguments passed through to :func:`sarpy.processing.sicd.normalize_sicd.sicd_degrade_reweight`
Returns
-------
None|FlatSICDReader
No return if `output_file` is provided, otherwise the returns the in-memory
reader object.
"""
return quality_degrade(
reader, index=index, output_file=output_file,
desired_resolution=desired_resolution, desired_bandwidth=desired_bandwidth,
**kwargs)
def quality_degrade_noise(
reader: Union[str, SICDTypeReader],
index: int = 0,
output_file: Optional[str] = None,
desired_nesz: Optional[float] = None,
**kwargs) -> Optional[FlatSICDReader]:
"""
Create a degraded quality SICD based on INCREASING the noise to the desired
Noise Equivalent Sigma Zero value. The produced SICD will have uniform weighting.
.. warning::
This will fail for a SICD which is not fully Radiometrically calibrated,
with ABSOLUTE noise type.
Parameters
----------
reader : str|SICDTypeReader
index : int
The reader index to be used.
output_file : None|str
If `None`, an in-memory SICD reader instance will be returned. Otherwise,
this is the path for the produced output SICD file.
desired_nesz : None|float
The desired noise equivalent sigma zero value.
kwargs
Keyword arguments passed through to :func:`sarpy.processing.sicd.normalize_sicd.sicd_degrade_reweight`
Returns
-------
None|FlatSICDReader
No return if `output_file` is provided, otherwise the returns the in-memory
reader object.
"""
return quality_degrade(reader, index=index, output_file=output_file, desired_nesz=desired_nesz, **kwargs)
def quality_degrade_rniirs(
reader: Union[str, SICDTypeReader],
index: int = 0,
output_file: Optional[str] = None,
desired_rniirs: Optional[float] = None,
alpha: float = 0,
**kwargs) -> Optional[FlatSICDReader]:
r"""
Create a degraded quality SICD based on the desired estimated RNIIRS value.
The produced SICD will have uniform weighting.
The sicd degradation will be performed as follows:
- The current information density/current rniirs **with respect to uniform weighting**
will be found.
- The information density required to produce the desired rniirs will be found.
- This will be used, along with the :math:`\alpha` value, will be used in
:func:`get_bandwidth_noise_distribution` to determine the best feasible
multipliers for bandwidth and noise values.
- These desired bandwidth and noise values will then be used in conjunction
with :func:`sicd_degrade_reweight`.
.. warning::
This will fail for a SICD which is not fully Radiometrically calibrated,
with `'ABSOLUTE'` noise type.
Parameters
----------
reader : str|SICDTypeReader
index : int
The reader index to be used.
output_file : None|str
If `None`, an in-memory SICD reader instance will be returned. Otherwise,
this is the path for the produced output SICD file.
desired_rniirs : None|float
The desired rniirs value, according to the RGIQE methodology.
alpha : float
This must be a number in the interval [0, 1] defining the (geometric)
distribution of variability between required influence from increasing
noise and require influence of decreasing bandwidth.
kwargs
Keyword arguments passed through to :func:`sarpy.processing.sicd.normalize_sicd.sicd_degrade_reweight`
Returns
-------
None|FlatSICDReader
No return if `output_file` is provided, otherwise the returns the in-memory
reader object.
"""
if desired_rniirs is None:
return quality_degrade(reader, index=index, output_file=output_file, **kwargs)
reader, index = _validate_reader(reader, index)
sicd = reader.get_sicds_as_tuple()[index]
current_noise = numpy.exp(numpy.log(10)*0.1*sicd.Radiometric.NoiseLevel.NoisePoly[0, 0])
bandwidth_multiplier, noise_multiplier = get_bandwidth_noise_distribution(
sicd, alpha, desired_rniirs=desired_rniirs)
desired_bandwidth = (
sicd.Grid.Row.ImpRespBW*bandwidth_multiplier[0],
sicd.Grid.Col.ImpRespBW*bandwidth_multiplier[1])
add_noise = (noise_multiplier - 1)*current_noise
if alpha == 0 or add_noise <= 0:
add_noise = None
row_aperture, row_bw_factor, column_aperture, column_bw_factor = _map_bandwidth_parameters(
sicd, desired_bandwidth=desired_bandwidth)
row_weighting, column_weighting = _get_uniform_weight_dicts(sicd)
return sicd_degrade_reweight(
reader, output_file=output_file, index=index,
row_aperture=row_aperture, row_weighting=row_weighting,
column_aperture=column_aperture, column_weighting=column_weighting,
add_noise=add_noise, **kwargs)
| 34,889 | 34.385396 | 113 | py |
sarpy | sarpy-master/sarpy/processing/sicd/ccd.py | """
The module contains methods for computing a coherent change detection from registered images
"""
from typing import Union, Tuple
import numpy
import scipy.signal
__classification__ = "UNCLASSIFIED"
__author__ = ('Thomas Mccullough', 'Wade Schwartzkopf', 'Mike Dowell')
def mem(
reference_image: numpy.ndarray,
match_image: numpy.ndarray,
corr_window_size: Union[int, Tuple[int, int]]) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Performs coherent change detection, following the equation as described in
Jakowatz, et al., "Spotlight-mode Synthetic Aperture radar: A Signal
Processing Approach".
.. warning: This assumes that the two arrays have already been properly
registered with respect to one another, and all processing will proceed
directly in memory.
Parameters
----------
reference_image : numpy.ndarray
match_image : numpy.ndarray
corr_window_size : int|tuple
The correlation window size. If int, a square correlation window of
given size will be used. If tuple, it must be a two element tuple of
ints which describe the correlation window size.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The ccd and phase arrays
"""
if isinstance(corr_window_size, int):
kernel = numpy.ones((corr_window_size, corr_window_size), dtype=numpy.float32)
elif isinstance(corr_window_size, tuple) and len(corr_window_size) == 2:
kernel = numpy.ones(corr_window_size, dtype=numpy.float32)
else:
raise TypeError('corr_window_size is required to be an int or two element tuple of ints')
inner_product = scipy.signal.convolve2d(
numpy.conj(reference_image)*match_image, kernel, mode='same')
# calculate magnitude of smeared reference image, accounting for numerical errors
ref_mag = scipy.signal.convolve2d(
reference_image.real*reference_image.real + reference_image.imag*reference_image.imag,
kernel, mode='same')
ref_mag[ref_mag < 0] = 0
ref_mag = numpy.sqrt(ref_mag)
# same for match image
match_mag = scipy.signal.convolve2d(
match_image.real*match_image.real + match_image.imag*match_image.imag,
kernel, mode='same')
match_mag[match_mag < 0] = 0
match_mag = numpy.sqrt(match_mag)
# perform the ccd calculation
ccd = numpy.where((ref_mag > 0) & (match_mag > 0), inner_product/(ref_mag*match_mag), numpy.float32(0.0))
phase = numpy.angle(inner_product)
return ccd, phase
| 2,535 | 36.850746 | 109 | py |
sarpy | sarpy-master/sarpy/processing/sicd/windows.py | """
Window function definitions and a few helper functions. This just passes through
to scipy functions after managing scipy version dependent import structure.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from typing import Union, Optional
import scipy
import numpy
from scipy.optimize import newton
_version_string_parts = scipy.__version__.split('.')
_version = (int(_version_string_parts[0]), int(_version_string_parts[1]))
############
# some basic window function definitions
if _version >= (1, 1):
# noinspection PyUnresolvedReferences
from scipy.signal.windows import general_hamming as _general_hamming, \
kaiser as _kaiser
else:
_general_hamming = None
# noinspection PyUnresolvedReferences
from scipy.signal import kaiser as _kaiser
if _version >= (1, 6):
# noinspection PyUnresolvedReferences
from scipy.signal.windows import taylor as _taylor
else:
_taylor = None
def general_hamming(
M: int,
alpha: float,
sym: bool = True) -> numpy.ndarray:
r"""
Returns a generalized hamming function. Constructed (non-symmetric) as
:math:`\alpha - (1-\alpha)\cos\left(\frac{2\pi n}{M-1}\right) 0\leq n \leq M-1`
Parameters
----------
M : int
Number of points in the output window.
alpha : float
The window coefficient.
sym : bool
When `True` (default), generates a symmetric window, for use in filter
design. When `False`, generates a periodic window, for use in spectral analysis.
Returns
-------
numpy.ndarray
"""
if _general_hamming is not None:
return _general_hamming(M, alpha, sym=sym)
if (M % 2) == 0:
k = int(M / 2)
else:
k = int((M + 1) / 2)
theta = 2 * numpy.pi * numpy.arange(k) / (M - 1)
weights = numpy.zeros((M,), dtype=numpy.float64)
if sym:
weights[:k] = (alpha - (1 - alpha) * numpy.cos(theta))
weights[k:] = weights[k - 1::-1]
else:
weights[:k] = (alpha - (1 - alpha) * numpy.cos(theta))[::-1]
weights[k:] = weights[k - 1::-1]
return weights
def hamming(
M: int,
sym: bool = True) -> numpy.ndarray:
"""
The hamming window, which is a general hamming window with alpha=0.54.
Parameters
----------
M : int
Number of points in the output window.
sym : bool
When `True` (default), generates a symmetric window, for use in filter
design. When `False`, generates a periodic window, for use in spectral analysis.
Returns
-------
numpy.ndarray
"""
return general_hamming(M, 0.54, sym=sym)
def hanning(
M: int,
sym: bool = True) -> numpy.ndarray:
"""
The hanning or hann window, which is a general hamming window with alpha=0.5.
Parameters
----------
M : int
Number of points in the output window.
sym : bool
When `True` (default), generates a symmetric window, for use in filter
design. When `False`, generates a periodic window, for use in spectral analysis.
Returns
-------
numpy.ndarray
"""
return general_hamming(M, 0.5, sym=sym)
def taylor(
M: int,
nbar: int = 4,
sll: float = -30,
norm: bool = True,
sym: bool = True) -> numpy.ndarray:
"""
The Taylor window taper function approximates the Dolph-Chebyshev windows
constant sidelobe level for a parameterized number of near-in sidelobes,
but then allows a taper beyond.
The SAR (synthetic aperature radar) community commonly uses Taylor weighting
for image formation processing because it provides strong, selectable sidelobe
suppression with minimum broadening of the mainlobe.
Parameters
----------
M : int
Number of points in the output window.
nbar : int
Number of nearly constant level sidelobes adjacent to the mainlobe.
sll : float
Desired suppression of sidelobe level in decibels (dB) relative to the
DC gain of the mainlobe. This should be a positive number.
norm : bool
When `True` (default), divides the window by the largest (middle) value
for odd-length windows or the value that would occur between the two
repeated middle values for even-length windows such that all values are
less than or equal to 1. When `False` the DC gain will remain at 1 (0 dB)
and the sidelobes will be sll dB down.
sym : bool
When `True` (default), generates a symmetric window, for use in filter
design. When `False`, generates a periodic window, for use in spectral analysis.
Returns
-------
numpy.ndarray
"""
if _taylor is not None:
if sll < 0:
sll *= -1
return _taylor(M, nbar=nbar, sll=sll, norm=norm, sym=sym)
if sll > 0:
sll *= -1
a = numpy.arccosh(10**(-sll/20.))/numpy.pi
# Taylor pulse widening (dilation) factor
sp2 = (nbar*nbar)/(a*a + (nbar-0.5)*(nbar-0.5))
# the angular space in n points
xi = numpy.linspace(-numpy.pi, numpy.pi, M)
# calculate the cosine weights
out = numpy.ones((M, ), dtype=numpy.float64) # the "constant" term
max_value = 1.0
coefs = numpy.arange(1, nbar)
sgn = 1
for m in coefs:
coefs1 = (coefs - 0.5)
coefs2 = coefs[coefs != m]
numerator = numpy.prod(1 - (m*m)/(sp2*(a*a + coefs1*coefs1)))
denominator = numpy.prod(1 - (m*m)/(coefs2*coefs2))
out += sgn*(numerator/denominator)*numpy.cos(m*xi)
max_value += sgn*(numerator/denominator)
sgn *= -1
if not sym:
k = int(M/2)
l = M-k
out2 = numpy.empty((M, ), dtype='float64')
out2[:k] = out[l:]
out2[k:] = out[:l]
out = out2
if norm:
out /= max_value
return out
def kaiser(
M: int,
beta: float,
sym: bool = True) -> numpy.ndarray:
"""
Return a Kaiser window, which is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window.
beta : float
Shape parameter, determines trade-off between main-lobe width and side
lobe level. As beta gets large, the window narrows.
sym : bool
When `True` (default), generates a symmetric window, for use in filter
design. When `False`, generates a periodic window, for use in spectral analysis.
Returns
-------
numpy.ndarray
"""
return _kaiser(M, beta, sym=sym)
#################
# helper methods
def hamming_ipr(
x: Union[numpy.ndarray, float],
a: float) -> Union[numpy.ndarray, float]:
"""
Evaluate the Hamming impulse response function over the given array.
Parameters
----------
x : numpy.ndarray|float|int
a : float
The Hamming parameter value.
Returns
-------
numpy.ndarray
"""
return a*numpy.sinc(x) + 0.5*(1-a)*(numpy.sinc(x-1) + numpy.sinc(x+1)) - a/numpy.sqrt(2)
def get_hamming_broadening_factor(coef: float) -> float:
test_array = numpy.linspace(0.3, 2.5, 100)
values = hamming_ipr(test_array, coef)
init_value = test_array[numpy.argmin(numpy.abs(values))]
zero = newton(hamming_ipr, init_value, args=(coef,), tol=1e-12, maxiter=100)
return 2 * zero
def find_half_power(
wgt_funct: Optional[numpy.ndarray],
oversample: int = 1024) -> Optional[float]:
"""
Find the half power point of the impulse response function.
Parameters
----------
wgt_funct : None|numpy.ndarray
oversample : int
Returns
-------
None|float
"""
if wgt_funct is None:
return None
# solve for the half-power point in an oversampled impulse response
impulse_response = numpy.abs(numpy.fft.fft(wgt_funct, wgt_funct.size*oversample))/numpy.sum(wgt_funct)
ind = numpy.flatnonzero(impulse_response < 1 / numpy.sqrt(2))[0]
# find first index with less than half power,
# then linearly interpolate to estimate 1/sqrt(2) crossing
v0 = impulse_response[ind - 1]
v1 = impulse_response[ind]
zero_ind = ind - 1 + (1./numpy.sqrt(2) - v0)/(v1 - v0)
return 2*zero_ind/oversample
| 8,269 | 27.419244 | 106 | py |
sarpy | sarpy-master/sarpy/processing/sicd/csi.py | """
The methods for computing a color sub-aperture image for SICD type images.
As noted in the CSICalculator class, the full resolution data along the split dimension
is required, so sub-sampling along the split dimension does not decrease the amount of
data which must be fetched and/or processing which must be performed.
Examples
--------
.. code-block:: python
from matplotlib import pyplot
from sarpy.io.complex.converter import open_complex
from sarpy.processing.sicd.csi import CSICalculator
from sarpy.visualization.remap import Density
# open a sicd type file
reader = open_complex("<file name>")
# see the sizes of all image segments
print(reader.get_data_size_as_tuple())
# construct the csi performer instance
# make sure to set the index and dimension as appropriate
csi_calculator = CSICalculator(reader, dimension=0, index=0)
# see the size for this particular image element
# this is identical to the data size from the reader at index
print(csi_calculator.data_size)
# set a different index or change the dimension
# csi_calculator.index = 2
# csi_calculator.dimension = 1
# calculate the csi for an image segment
csi_data = csi_calculator[300:500, 200:600]
# create our remap function
density = Density()
# let's view this csi image using matplotlib
fig, axs = pyplot.subplots(nrows=1, ncols=1)
axs.imshow(density(csi_data), aspect='equal')
pyplot.show()
"""
__classification__ = "UNCLASSIFIED"
__author__ = 'Thomas McCullough'
from typing import Union, Tuple, Optional, Sequence
import numpy
from sarpy.processing.sicd.fft_base import FFTCalculator, fft, ifft, fftshift
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.utils import get_fetch_block_size
from sarpy.io.general.slice_parsing import get_slice_result_size
def filter_map_construction(siz: Union[int, float]) -> numpy.ndarray:
"""
Provides the RGB filter array for sub-aperture processing.
Parameters
----------
siz : int|float
the size of the colormap
Returns
-------
numpy.ndarray
the `siz x 3` colormap array
"""
if siz < 1:
raise ValueError('Cannot create the filter map with fewer than 4 elements.')
siz = int(round(siz))
basic_size = int(numpy.ceil(0.25*siz))
# create trapezoidal stack
trapezoid = numpy.hstack(
(numpy.arange(1, basic_size+1, dtype=numpy.int32),
numpy.full((basic_size-1, ), basic_size, dtype=numpy.int32),
numpy.arange(basic_size, 0, -1, dtype=numpy.int32)))/float(basic_size)
out = numpy.zeros((siz, 3), dtype=numpy.float64)
# create red, green, blue indices
green_inds = int(round(0.5*(siz - trapezoid.size))) + numpy.arange(trapezoid.size)
red_inds = ((green_inds + basic_size) % siz)
blue_inds = ((green_inds - basic_size) % siz)
# populate our array
out[red_inds, 0] = trapezoid
out[green_inds, 1] = trapezoid
out[blue_inds, 2] = trapezoid
return out
def csi_array(
array: numpy.ndarray,
dimension: int = 0,
platform_direction: str = 'R',
fill: Union[int, float] = 1,
filter_map: Optional[numpy.ndarray] = None) -> numpy.ndarray:
"""
Creates a color subaperture array from a complex array.
.. Note: this ignores any potential sign issues for the fft and ifft, because
the results would be identical - fft followed by ifft versus ifft followed
by fft.
Parameters
----------
array : numpy.ndarray
The complex valued SAR data, assumed to be in the "image" domain.
Required to be two-dimensional.
dimension : int
The dimension over which to split the sub-aperture.
platform_direction : str
The (case insensitive) platform direction, required to be one of `('R', 'L')`.
fill : float
The fill factor. This will be ignored if `filter_map` is provided.
filter_map : None|numpy.ndarray
The RGB filter mapping. This is assumed constructed using :func:`filter_map_construction`.
Returns
-------
numpy.ndarray
"""
if not (isinstance(array, numpy.ndarray) and len(array.shape) == 2 and numpy.iscomplexobj(array)):
raise ValueError('array must be a two-dimensional numpy array of complex dtype')
dimension = int(dimension)
if dimension not in [0, 1]:
raise ValueError('dimension must be 0 or 1, got {}'.format(dimension))
if dimension == 0:
array = array.T
pdir_func = platform_direction.upper()[0]
if pdir_func not in ['R', 'L']:
raise ValueError('It is expected that pdir is one of "R" or "L". Got {}'.format(platform_direction))
# get our filter construction data
if filter_map is None:
fill = max(1.0, float(fill))
filter_map = filter_map_construction(array.shape[1]/fill)
if not (isinstance(filter_map, numpy.ndarray) and
filter_map.dtype.name in ['float32', 'float64'] and
filter_map.ndim == 2 and filter_map.shape[1] == 3):
raise ValueError('filter_map must be a N x 3 numpy array of float dtype.')
# move to phase history domain
ph_indices = int(numpy.floor(0.5*(array.shape[1] - filter_map.shape[0]))) + \
numpy.arange(filter_map.shape[0], dtype=numpy.int32)
ph0 = fftshift(ifft(numpy.cast[numpy.complex128](array), axis=1), axes=1)[:, ph_indices]
# construct the filtered workspace
# NB: processing is more efficient with color band in the first dimension
ph0_RGB = numpy.zeros((3, array.shape[0], filter_map.shape[0]), dtype=numpy.complex128)
for i in range(3):
ph0_RGB[i, :, :] = ph0*filter_map[:, i]
del ph0
# Shift phase history to avoid having zeropad in middle of filter, to alleviate
# the purple sidelobe artifact.
filter_shift = int(numpy.ceil(0.25*filter_map.shape[0]))
ph0_RGB[0, :] = numpy.roll(ph0_RGB[0, :], -filter_shift)
ph0_RGB[2, :] = numpy.roll(ph0_RGB[2, :], filter_shift)
# NB: the green band is already centered
# FFT back to the image domain
im0_RGB = fft(fftshift(ph0_RGB, axes=2), n=array.shape[1], axis=2)
del ph0_RGB
# Replace the intensity with the original image intensity to main full resolution
# (in intensity, but not in color).
scale_factor = numpy.abs(array)/numpy.abs(im0_RGB).max(axis=0)
im0_RGB = numpy.abs(im0_RGB)*scale_factor
# reorient image so that the color segment is in the final dimension
if dimension == 0:
im0_RGB = im0_RGB.transpose([2, 1, 0])
else:
im0_RGB = im0_RGB.transpose([1, 2, 0])
if pdir_func == 'R':
# reverse the color band order
im0_RGB = im0_RGB[:, :, ::-1]
return im0_RGB
class CSICalculator(FFTCalculator):
"""
Class for creating color sub-aperture image from a reader instance.
It is important to note that full resolution is required for processing along
the split dimension, so sub-sampling along the split dimension does not decrease
the amount of data which must be fetched.
"""
def __init__(
self,
reader: Union[str, SICDTypeReader],
dimension: int = 0,
index: int = 0,
block_size: Union[None, int, float] = 50):
"""
Parameters
----------
reader : str|SICDTypeReader
Input file path or reader object, which must be of sicd type.
dimension : int
The dimension over which to split the sub-aperture.
index : int
The sicd index to use.
block_size : None|int|float
The approximate processing block size to fetch, given in MB.
"""
super(CSICalculator, self).__init__(
reader, dimension=dimension, index=index, block_size=block_size)
def get_fetch_block_size(
self,
start_element: int,
stop_element: int) -> int:
"""
Gets the fetch block size for the given full resolution section.
This assumes that the fetched data will be 24 bytes per pixel, in
accordance with 3-band complex64 data.
Parameters
----------
start_element : int
stop_element : int
Returns
-------
int
"""
return get_fetch_block_size(start_element, stop_element, self.block_size_in_bytes, bands=3)
def _full_row_resolution(
self,
row_range: Union[slice, Tuple[int, int, int]],
col_range: Union[slice, Tuple[int, int, int]],
filter_map: Optional[numpy.ndarray] = None) -> numpy.ndarray:
data = super(CSICalculator, self)._full_row_resolution(row_range, col_range)
return csi_array(
data, dimension=0, platform_direction=self._platform_direction,
filter_map=filter_map)
def _full_column_resolution(
self,
row_range: Union[slice, Tuple[int, int, int]],
col_range: Union[slice, Tuple[int, int, int]],
filter_map: Optional[numpy.ndarray] = None) -> numpy.ndarray:
data = super(CSICalculator, self)._full_column_resolution(row_range, col_range)
return csi_array(
data, dimension=1, platform_direction=self._platform_direction,
filter_map=filter_map)
def _prepare_output(
self,
row_range: Union[slice, Tuple[int, int, int]],
col_range: Union[slice, Tuple[int, int, int]]) -> numpy.ndarray:
if isinstance(row_range, Sequence):
row_range = slice(*row_range)
if isinstance(col_range, Sequence):
col_range = slice(*col_range)
row_count = get_slice_result_size(row_range)
col_count = get_slice_result_size(col_range)
out_size = (row_count, col_count, 3)
return numpy.zeros(out_size, dtype=numpy.float64)
def __getitem__(self, item) -> numpy.ndarray:
"""
Fetches the csi data based on the input slice.
Parameters
----------
item
Returns
-------
numpy.ndarray
"""
if self._fill is None:
raise ValueError('Unable to proceed unless the index and dimension are set.')
def get_dimension_details(the_range: Union[slice, Tuple[int, int, int]]):
if isinstance(the_range, Sequence):
start, stop, step = the_range
elif isinstance(the_range, slice):
start = the_range.start
stop = the_range.stop
step = the_range.step
else:
raise TypeError('Got unexpected range input {}'.format(the_range))
full_count = abs(int(stop - start))
the_snip = -1 if step < 0 else 1
t_filter_map = filter_map_construction(full_count/self.fill)
t_block_size = self.get_fetch_block_size(start, stop)
t_full_range = (start, stop, the_snip)
return t_filter_map, t_block_size, t_full_range
# parse the slicing to ensure consistent structure
row_range, col_range, _ = self._parse_slicing(item)
if self.dimension == 0:
# we will proceed fetching full row resolution
filter_map, row_block_size, this_row_range = get_dimension_details(row_range)
# get our block definitions
column_blocks, result_blocks = self.extract_blocks(col_range, row_block_size)
# noinspection PyTypeChecker
if len(column_blocks) == 1:
# it's just a single block
csi = self._full_row_resolution(this_row_range, col_range, filter_map)
return csi[::abs(row_range.step), :, :]
else:
# prepare the output space
out = self._prepare_output(row_range, col_range)
# noinspection PyTypeChecker
for this_column_range, result_range in zip(column_blocks, result_blocks):
csi = self._full_row_resolution(this_row_range, this_column_range, filter_map)
out[:, result_range[0]:result_range[1], :] = csi[::abs(row_range.step), :, :]
return out
else:
# we will proceed fetching full column resolution
filter_map, column_block_size, this_col_range = get_dimension_details(col_range)
# get our block definitions
row_blocks, result_blocks = self.extract_blocks(row_range, column_block_size)
# noinspection PyTypeChecker
if len(row_blocks) == 1:
# it's just a single block
csi = self._full_column_resolution(row_range, this_col_range, filter_map)
return csi[:, ::abs(col_range.step), :]
else:
# prepare the output space
out = self._prepare_output(row_range, col_range)
# noinspection PyTypeChecker
for this_row_range, result_range in zip(row_blocks, result_blocks):
csi = self._full_column_resolution(this_row_range, this_col_range, filter_map)
out[result_range[0]:result_range[1], :, :] = csi[:, ::abs(col_range.step), :]
return out
| 13,313 | 37.479769 | 108 | py |
sarpy | sarpy-master/sarpy/processing/sicd/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/processing/sicd/fft_base.py | """
Helper classes and methods for Fourier processing schemes.
"""
__classification__ = "UNCLASSIFIED"
__author__ = 'Thomas McCullough'
import logging
from typing import Union
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.processing.ortho_rectify import FullResolutionFetcher
# NB: the below are intended as common imports from other locations
# leave them here, even if unused
import numpy
import scipy
if scipy.__version__ < '1.4':
# noinspection PyUnresolvedReferences
from scipy.fftpack import fft, ifft, fftshift, ifftshift
else:
# noinspection PyUnresolvedReferences
from scipy.fft import fft, ifft, fftshift, ifftshift
logger = logging.getLogger(__name__)
class FFTCalculator(FullResolutionFetcher):
"""
Base Fourier processing calculator class.
This is intended for processing schemes where full resolution is required
along the processing dimension, so sub-sampling along the processing
dimension does not decrease the amount of data which must be fetched.
"""
__slots__ = (
'_platform_direction', '_fill')
def __init__(
self,
reader: Union[str, SICDTypeReader],
dimension: int = 0,
index: int = 0,
block_size: Union[None, int, float] = 50):
"""
Parameters
----------
reader : str|SICDTypeReader
Input file path or reader object, which must be of sicd type.
dimension : int
The dimension over which to split the sub-aperture.
index : int
The sicd index to use.
block_size : int
The approximate processing block size to fetch, given in MB. The
minimum value for use here will be 1.
"""
self._platform_direction = None # set with the index setter
self._fill = None # set implicitly with _set_fill()
super(FFTCalculator, self).__init__(reader, dimension=dimension, index=index, block_size=block_size)
@property
def dimension(self) -> int:
"""
int: The dimension along which to perform the color subaperture split.
"""
return self._dimension
@dimension.setter
def dimension(self, value):
value = int(value)
if value not in [0, 1]:
raise ValueError('dimension must be 0 or 1, got {}'.format(value))
self._dimension = value
self._set_fill()
@property
def index(self) -> int:
"""
int: The index of the reader.
"""
return self._index
@index.setter
def index(self, value):
super(FFTCalculator, self)._set_index(value)
if self._sicd.SCPCOA is None or self._sicd.SCPCOA.SideOfTrack is None:
logger.warning(
'The sicd object at index {} has unpopulated SCPCOA.SideOfTrack.\n\t'
'Defaulting to "R", which may be incorrect.')
self._platform_direction = 'R'
else:
self._platform_direction = self._sicd.SCPCOA.SideOfTrack
self._set_fill()
@property
def fill(self) -> float:
"""
float: The fill factor for the fourier processing.
"""
return self._fill
def _set_fill(self):
self._fill = None
if self._dimension is None:
return
if self._index is None:
return
if self.dimension == 0:
try:
fill = 1.0/(self.sicd.Grid.Row.SS*self.sicd.Grid.Row.ImpRespBW)
except (ValueError, AttributeError, TypeError):
fill = 1.0
else:
try:
fill = 1.0/(self.sicd.Grid.Col.SS*self.sicd.Grid.Col.ImpRespBW)
except (ValueError, AttributeError, TypeError):
fill = 1.0
self._fill = max(1.0, float(fill))
def __getitem__(self, item) -> numpy.ndarray:
"""
Fetches the processed data based on the input slice.
Parameters
----------
item
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def _validate_fft_input(array: numpy.ndarray) -> None:
"""
Validate the fft input.
Parameters
----------
array : numpy.ndarray
Returns
-------
None
"""
if not isinstance(array, numpy.ndarray):
raise TypeError('array must be a numpy array')
if not numpy.iscomplexobj(array):
raise ValueError('array must have a complex data type')
if array.ndim != 2:
raise ValueError('array must be a two-dimensional array. Got shape {}'.format(array.shape))
def _determine_direction(
sicd: SICDType, dimension: int) -> int:
"""
Determine the default sign for the fft.
Parameters
----------
sicd : SICDType
dimension : int
Returns
-------
int
"""
sgn = None
if dimension == 0:
try:
sgn = sicd.Grid.Row.Sgn
except AttributeError:
pass
elif dimension == 1:
try:
sgn = sicd.Grid.Col.Sgn
except AttributeError:
pass
else:
raise ValueError('dimension must be one of 0 or 1.')
return -1 if sgn is None else sgn
def fft_sicd(array: numpy.ndarray, dimension: int, sicd: SICDType) -> numpy.ndarray:
"""
Apply the forward one-dimensional forward fft to data associated with the
given sicd along the given dimension/axis, in accordance with the sign
populated in the SICD structure (default is -1).
Parameters
----------
array : numpy.ndarray
The data array, which must be two-dimensional and complex.
dimension : int
Must be one of 0, 1.
sicd : SICDType
The associated SICD structure.
Returns
-------
numpy.ndarray
"""
sgn = _determine_direction(sicd, dimension)
return fft(array, axis=dimension) if sgn < 0 else ifft(array, axis=dimension)
def ifft_sicd(array: numpy.ndarray, dimension: int, sicd: SICDType) -> numpy.ndarray:
"""
Apply the inverse one-dimensional fft to data associated with the given sicd
along the given dimension/axis.
Parameters
----------
array : numpy.ndarray
The data array, which must be two-dimensional and complex.
dimension : int
Must be one of 0, 1.
sicd : SICDType
The associated SICD structure.
Returns
-------
numpy.ndarray
"""
sgn = _determine_direction(sicd, dimension)
return ifft(array, axis=dimension) if sgn < 0 else fft(array, axis=dimension)
def fft2_sicd(array: numpy.ndarray, sicd: SICDType) -> numpy.ndarray:
"""
Apply the forward two-dimensional fft (i.e. both axes) to data associated with
the given sicd.
Parameters
----------
array : numpy.ndarray
The data array, which must be two-dimensional and complex.
sicd : SICDType
The associated SICD structure.
Returns
-------
numpy.ndarray
"""
return fft_sicd(fft_sicd(array, 0, sicd), 1, sicd)
def ifft2_sicd(array: numpy.ndarray, sicd: SICDType) -> numpy.ndarray:
"""
Apply the inverse two-dimensional fft (i.e. both axes) to data associated with
the given sicd.
Parameters
----------
array : numpy.ndarray
The data array, which must be two-dimensional and complex.
sicd : SICDType
The associated SICD structure.
Returns
-------
numpy.ndarray
"""
return ifft_sicd(ifft_sicd(array, 0, sicd), 1, sicd)
| 7,595 | 25.840989 | 108 | py |
sarpy | sarpy-master/sarpy/processing/ortho_rectify/base.py | """
Common ortho-rectification elements
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import os
from typing import Union, Tuple, List, Optional, Sequence
import numpy
from sarpy.io.complex.converter import open_complex
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.general.slice_parsing import verify_subscript
from sarpy.io.complex.utils import get_fetch_block_size, extract_blocks
from sarpy.geometry.geocoords import ecf_to_geodetic
from sarpy.visualization.remap import RemapFunction
from .ortho_methods import OrthorectificationHelper
logger = logging.getLogger(__name__)
class FullResolutionFetcher(object):
"""
This is a base class for provided a simple API for processing schemes where
full resolution is required along the processing dimension, so sub-sampling
along the processing dimension does not decrease the amount of data which
must be fetched.
"""
__slots__ = (
'_reader', '_index', '_sicd', '_dimension', '_data_size', '_block_size')
def __init__(
self,
reader: Union[str, SICDTypeReader],
dimension: int = 0,
index: int = 0,
block_size: Union[None, int, float] = 10):
"""
Parameters
----------
reader : str|SICDTypeReader
Input file path or reader object, which must be of sicd type.
dimension : int
The dimension over which to split the sub-aperture.
index : int
The sicd index to use.
block_size : None|int|float
The approximate processing block size to fetch, given in MB. The
minimum value for use here will be 0.25. `None` represents processing
as a single block.
"""
self._index = None # set explicitly
self._sicd = None # set with index setter
self._dimension = None # set explicitly
self._data_size = None # set with index setter
self._block_size = None # set explicitly
# validate the reader
if isinstance(reader, str):
reader = open_complex(reader)
if not isinstance(reader, SICDTypeReader):
raise TypeError('reader is required to be a path name for a sicd-type image, '
'or an instance of a reader object.')
self._reader = reader
# set the other properties
self.dimension = dimension
self.index = index
self.block_size = block_size
@property
def reader(self) -> SICDTypeReader:
"""
SICDTypeReader: The reader instance.
"""
return self._reader
@property
def dimension(self) -> int:
"""
int: The dimension along which to perform the color subaperture split.
"""
return self._dimension
@dimension.setter
def dimension(self, value):
value = int(value)
if value not in [0, 1]:
raise ValueError('dimension must be 0 or 1, got {}'.format(value))
self._dimension = value
@property
def data_size(self) -> Tuple[int, ...]:
"""
Tuple[int, ...]: The data size for the reader at the given index.
"""
return self._data_size
@property
def index(self) -> int:
"""
int: The index of the reader.
"""
return self._index
@index.setter
def index(self, value):
self._set_index(value)
def _set_index(self, value):
value = int(value)
if value < 0:
raise ValueError('The index must be a non-negative integer, got {}'.format(value))
sicds = self.reader.get_sicds_as_tuple()
if value >= len(sicds):
raise ValueError('The index must be less than the sicd count.')
self._index = value
self._sicd = sicds[value]
self._data_size = self.reader.get_data_size_as_tuple()[value]
@property
def block_size(self) -> Optional[float]:
"""
None|float: The approximate processing block size in MB, where `None`
represents processing in a single block.
"""
return self._block_size
@block_size.setter
def block_size(self, value):
if value is None:
self._block_size = None
else:
value = float(value)
if value < 0.25:
value = 0.25
self._block_size = value
@property
def block_size_in_bytes(self) -> Optional[int]:
"""
None|int: The approximate processing block size in bytes.
"""
return None if self._block_size is None else int(self._block_size*(2**20))
@property
def sicd(self) -> SICDType:
"""
SICDType: The sicd structure.
"""
return self._sicd
def _parse_slicing(
self,
item: Union[None, int, slice, Tuple[Union[int, slice], ...]]) -> Tuple[slice, slice, Optional[int]]:
if isinstance(item, tuple) and len(item) > 2:
if len(item) > 3:
raise ValueError('Got unexpected subscript {}'.format(item))
if len(item) == 3:
if not isinstance(item[2], int):
raise ValueError('Got unexpected subscript {}'.format(item))
return verify_subscript(item[:2], self._data_size) + (item[2], )
return verify_subscript(item, self._data_size) + (None, )
def get_fetch_block_size(self, start_element: int, stop_element: int) -> int:
"""
Gets the fetch block size for the given full resolution section.
This assumes that the fetched data will be 8 bytes per pixel, in
accordance with single band complex64 data.
Parameters
----------
start_element : int
stop_element : int
Returns
-------
int
"""
return get_fetch_block_size(start_element, stop_element, self.block_size_in_bytes, bands=1)
@staticmethod
def extract_blocks(
the_range: Union[slice, Tuple[int, int, int]],
index_block_size: Union[None, int, float]) -> Tuple[List[Tuple[int, int, int]], List[Tuple[int, int]]]:
"""
Convert the single range definition into a series of range definitions in
keeping with fetching of the appropriate block sizes.
Parameters
----------
the_range : slice|Tuple[int, int, int]
The input (off processing axis) range.
index_block_size : None|int|float
The size of blocks (number of indices).
Returns
-------
range_definitions: List[Tuple[int, int, int]]
The sequence of range definitions `(start index, stop index, step)`
relative to the overall image.
limit_indices: List[Tuple[int, int]]
The sequence of start/stop indices for positioning of the given
range relative to the original range.
"""
if isinstance(the_range, slice):
if the_range.stop is None:
if the_range.step > 0:
raise ValueError('Got unexpected slice {}'.format(the_range))
use_range = (the_range.start, -1, the_range.step)
else:
use_range = (the_range.start, the_range.stop, the_range.step)
else:
use_range = the_range
# noinspection PyTypeChecker
return extract_blocks(use_range, index_block_size)
def _full_row_resolution(
self,
row_range: Union[slice, Tuple[int, int, int]],
col_range: Union[slice, Tuple[int, int, int]]) -> numpy.ndarray:
"""
Perform the full row resolution data, with any appropriate calculations.
Parameters
----------
row_range : slice|Tuple[int, int, int]
col_range : slice|Tuple[int, int, int]
Returns
-------
numpy.ndarray
"""
if isinstance(row_range, Sequence):
row_range = slice(*row_range)
if isinstance(col_range, Sequence):
col_range = slice(*col_range)
# fetch the data and perform the csi calculation
if row_range.step not in [1, -1]:
raise ValueError('The step for row_range must be +/- 1, for full row resolution data.')
data = self.reader[(row_range, col_range, self.index)]
if data.ndim < 2:
data = numpy.reshape(data, (-1, 1))
# handle nonsense data with zeros
data[~numpy.isfinite(data)] = 0
return data
def _full_column_resolution(
self,
row_range: Union[slice, Tuple[int, int, int]],
col_range: Union[slice, Tuple[int, int, int]]) -> numpy.ndarray:
"""
Perform the full column resolution data, with any appropriate calculations.
Parameters
----------
row_range : Tuple[int, int, int]
col_range : Tuple[int, int, int]
Returns
-------
numpy.ndarray
"""
if isinstance(row_range, Sequence):
row_range = slice(*row_range)
if isinstance(col_range, Sequence):
col_range = slice(*col_range)
# fetch the data and perform the csi calculation
if col_range.step not in [1, -1]:
raise ValueError('The step for col_range must be +/- 1, for full col resolution data.')
data = self.reader[(row_range, col_range, self.index)]
if data.ndim < 2:
data = numpy.reshape(data, (1, -1))
# handle nonsense data with zeros
data[~numpy.isfinite(data)] = 0
return data
def _prepare_output(
self,
row_range: Union[slice, Tuple[int, int, int]],
col_range: Union[slice, Tuple[int, int, int]]) -> numpy.ndarray:
"""
Prepare the output workspace for :func:`__getitem__`.
Parameters
----------
row_range : slice|Tuple[int, int, int]
col_range : slice|Tuple[int, int, int]
Returns
-------
numpy.ndarray
"""
row_count = int((row_range[1] - row_range[0]) / float(row_range[2]))
col_count = int((col_range[1] - col_range[0]) / float(col_range[2]))
out_size = (row_count, col_count)
return numpy.zeros(out_size, dtype=numpy.complex64)
def __getitem__(self, subscript) -> numpy.ndarray:
"""
Fetches the processed data based on the input slice.
Parameters
----------
subscript
Returns
-------
numpy.ndarray
"""
subscript = verify_subscript(subscript, self.data_size)
return self.reader.read(*subscript, index=self.index)
class OrthorectificationIterator(object):
"""
This provides a generator for an Orthorectification process on a given
reader/index/(pixel) bounds.
"""
__slots__ = (
'_calculator', '_ortho_helper', '_pixel_bounds', '_ortho_bounds',
'_this_index', '_iteration_blocks', '_remap_function')
def __init__(
self,
ortho_helper: OrthorectificationHelper,
calculator: Optional[FullResolutionFetcher] = None,
bounds: Union[None, numpy.ndarray, tuple, list] = None,
remap_function: Optional[RemapFunction] = None,
recalc_remap_globals: bool = False):
"""
Parameters
----------
ortho_helper : OrthorectificationHelper
The ortho-rectification helper.
calculator : None|FullResolutionFetcher
The FullResolutionFetcher instance. If not provided, then this will
default to a base FullResolutionFetcher instance - which is only
useful for a basic detected image.
bounds : None|numpy.ndarray|list|tuple
The pixel bounds of the form `(min row, max row, min col, max col)`.
This will default to the full image.
remap_function : None|RemapFunction
The remap function to apply, if desired.
recalc_remap_globals : bool
Only applies if a remap function is provided, should we recalculate
any required global parameters? This will automatically happen if
they are not already set.
"""
self._this_index = None
self._iteration_blocks = None
self._remap_function = None
# validate ortho_helper
if not isinstance(ortho_helper, OrthorectificationHelper):
raise TypeError(
'ortho_helper must be an instance of OrthorectificationHelper, got '
'type {}'.format(type(ortho_helper)))
self._ortho_helper = ortho_helper
# validate calculator
if calculator is None:
calculator = FullResolutionFetcher(ortho_helper.reader, index=ortho_helper.index, dimension=0)
if not isinstance(calculator, FullResolutionFetcher):
raise TypeError(
'calculator must be an instance of FullResolutionFetcher, got '
'type {}'.format(type(calculator)))
self._calculator = calculator
if ortho_helper.reader.file_name is not None and calculator.reader.file_name is not None and \
os.path.abspath(ortho_helper.reader.file_name) != os.path.abspath(calculator.reader.file_name):
raise ValueError(
'ortho_helper has reader for file {}, while calculator has reader '
'for file {}'.format(ortho_helper.reader.file_name, calculator.reader.file_name))
if ortho_helper.index != calculator.index:
raise ValueError(
'ortho_helper is using index {}, while calculator is using '
'index {}'.format(ortho_helper.index, calculator.index))
# validate the bounds
if bounds is not None:
pixel_bounds, pixel_rectangle = ortho_helper.bounds_to_rectangle(bounds)
# get the corresponding ortho bounds
ortho_bounds = ortho_helper.get_orthorectification_bounds_from_pixel_object(pixel_rectangle)
else:
ortho_bounds = ortho_helper.get_full_ortho_bounds()
ortho_bounds, nominal_pixel_bounds = ortho_helper.extract_pixel_bounds(ortho_bounds)
# extract the values - ensure that things are within proper image bounds
pixel_bounds = ortho_helper.get_real_pixel_bounds(nominal_pixel_bounds)
# validate remap function
if remap_function is None or isinstance(remap_function, RemapFunction):
self._remap_function = remap_function
else:
raise TypeError(
'remap_function is expected to be an instance of RemapFunction, '
'got type `{}`'.format(type(remap_function)))
self._pixel_bounds = pixel_bounds
self._ortho_bounds = ortho_bounds
self._prepare_state(recalc_remap_globals=recalc_remap_globals)
@property
def ortho_helper(self) -> OrthorectificationHelper:
"""
OrthorectificationHelper: The ortho-rectification helper.
"""
return self._ortho_helper
@property
def calculator(self) -> FullResolutionFetcher:
"""
FullResolutionFetcher : The calculator instance.
"""
return self._calculator
@property
def sicd(self) -> SICDType:
"""
SICDType: The sicd structure.
"""
return self.calculator.sicd
@property
def pixel_bounds(self) -> numpy.ndarray:
"""
numpy.ndarray : Of the form `(row min, row max, col min, col max)`.
"""
return self._pixel_bounds
@property
def ortho_bounds(self) -> numpy.ndarray:
"""
numpy.ndarray : Of the form `(row min, row max, col min, col max)`. Note that
these are "unnormalized" orthorectified pixel coordinates.
"""
return self._ortho_bounds
@property
def ortho_data_size(self) -> Tuple[int, int]:
"""
Tuple[int, int] : The size of the overall ortho-rectified output.
"""
return (
int(self.ortho_bounds[1] - self.ortho_bounds[0]),
int(self.ortho_bounds[3] - self.ortho_bounds[2]))
@property
def remap_function(self) -> Optional[RemapFunction]:
"""
None|RemapFunction: The remap function to be applied.
"""
return self._remap_function
def get_ecf_image_corners(self) -> Optional[numpy.ndarray]:
"""
The corner points of the overall ortho-rectified output in ECF
coordinates. The ordering of these points follows the SICD convention.
Returns
-------
numpy.ndarray
"""
if self.ortho_bounds is None:
return None
_, ortho_pixel_corners = self._ortho_helper.bounds_to_rectangle(self.ortho_bounds)
return self._ortho_helper.proj_helper.ortho_to_ecf(ortho_pixel_corners)
def get_llh_image_corners(self) -> Optional[numpy.ndarray]:
"""
The corner points of the overall ortho-rectified output in Lat/Lon/HAE
coordinates. The ordering of these points follows the SICD convention.
Returns
-------
None|numpy.ndarray
"""
ecf_corners = self.get_ecf_image_corners()
if ecf_corners is None:
return None
else:
return ecf_to_geodetic(ecf_corners)
def _prepare_state(self, recalc_remap_globals: bool = False) -> None:
"""
Prepare the iteration state.
Parameters
----------
recalc_remap_globals : bool
Returns
-------
None
"""
if self.calculator.dimension == 0:
column_block_size = self.calculator.get_fetch_block_size(self.ortho_bounds[0], self.ortho_bounds[1])
self._iteration_blocks, _ = self.calculator.extract_blocks(
(self.ortho_bounds[2], self.ortho_bounds[3], 1), column_block_size)
else:
row_block_size = self.calculator.get_fetch_block_size(self.ortho_bounds[2], self.ortho_bounds[3])
self._iteration_blocks, _ = self.calculator.extract_blocks(
(self.ortho_bounds[0], self.ortho_bounds[1], 1), row_block_size)
if self.remap_function is not None and \
(recalc_remap_globals or not self.remap_function.are_global_parameters_set):
self.remap_function.calculate_global_parameters_from_reader(
self.ortho_helper.reader, index=self.ortho_helper.index, pixel_bounds=self.pixel_bounds)
@staticmethod
def _get_ortho_helper(
pixel_bounds: Union[Tuple[int, int, int, int], numpy.ndarray],
this_data: numpy.ndarray) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Get helper data for ortho-rectification.
Parameters
----------
pixel_bounds : Tuple[int, int, int, int]|numpy.ndarray
this_data : numpy.ndarray
Returns
-------
row_array: numpy.ndarray
col_array: numpy.ndarray
"""
rows_temp = pixel_bounds[1] - pixel_bounds[0]
if this_data.shape[0] == rows_temp:
row_array = numpy.arange(pixel_bounds[0], pixel_bounds[1])
elif this_data.shape[0] == (rows_temp - 1):
row_array = numpy.arange(pixel_bounds[0], pixel_bounds[1] - 1)
else:
raise ValueError('Unhandled data size mismatch {} and {}'.format(this_data.shape, rows_temp))
cols_temp = pixel_bounds[3] - pixel_bounds[2]
if this_data.shape[1] == cols_temp:
col_array = numpy.arange(pixel_bounds[2], pixel_bounds[3])
elif this_data.shape[1] == (cols_temp - 1):
col_array = numpy.arange(pixel_bounds[2], pixel_bounds[3] - 1)
else:
raise ValueError('Unhandled data size mismatch {} and {}'.format(this_data.shape, cols_temp))
return row_array, col_array
def _get_orthorectified_version(
self,
this_ortho_bounds: numpy.ndarray,
pixel_bounds: Union[Tuple[int, int, int, int], numpy.ndarray],
this_data: numpy.ndarray) -> numpy.ndarray:
"""
Get the orthorectified version from the raw values and pixel information.
Parameters
----------
this_ortho_bounds
pixel_bounds
this_data
Returns
-------
numpy.ndarray
"""
row_array, col_array = self._get_ortho_helper(pixel_bounds, this_data)
ortho_data = self._ortho_helper.get_orthorectified_from_array(
this_ortho_bounds, row_array, col_array, this_data)
if self.remap_function is None:
return ortho_data
else:
return self.remap_function(ortho_data)
def _get_state_parameters(
self,
pad: int = 10) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Gets the pixel information associated with the current state.
Parameters
----------
pad : int
Pad the pixel bounds, to accommodate for any edge cases.
Returns
-------
ortho_bounds: numpy.ndarray
pixel_bounds: numpy.ndarray
"""
if self._calculator.dimension == 0:
this_column_range = self._iteration_blocks[self._this_index]
# determine the corresponding pixel ranges to encompass these values
this_ortho_bounds, this_pixel_bounds = self._ortho_helper.extract_pixel_bounds(
(self.ortho_bounds[0], self.ortho_bounds[1], this_column_range[0], this_column_range[1]))
else:
this_row_range = self._iteration_blocks[self._this_index]
this_ortho_bounds, this_pixel_bounds = self._ortho_helper.extract_pixel_bounds(
(this_row_range[0], this_row_range[1], self.ortho_bounds[2], self.ortho_bounds[3]))
this_pixel_bounds[0::2] -= pad
this_pixel_bounds[1::2] += pad
return this_ortho_bounds, this_pixel_bounds
def __iter__(self):
return self
def __next__(self) -> Tuple[numpy.ndarray, Tuple[int, int]]:
"""
Get the next iteration of orthorectified data.
Returns
-------
data: numpy.ndarray
indices: Tuple[int, int]
The (normalized) indices `(start_row, start_col)` for this section of
data, relative to overall output shape.
"""
# NB: this is the Python 3 pattern for iteration
if self._this_index is None:
self._this_index = 0
else:
self._this_index += 1
# at this point, _this_index indicates which entry to return
if self._this_index >= len(self._iteration_blocks):
self._this_index = None # reset the iteration scheme
raise StopIteration()
this_ortho_bounds, this_pixel_bounds = self._get_state_parameters()
# accommodate for real pixel limits
this_pixel_bounds = self._ortho_helper.get_real_pixel_bounds(this_pixel_bounds)
# extract the csi data and ortho-rectify
logger.info(
'Fetching orthorectified coordinate block ({}:{}, {}:{}) of ({}, {})'.format(
this_ortho_bounds[0] - self.ortho_bounds[0], this_ortho_bounds[1] - self.ortho_bounds[0],
this_ortho_bounds[2] - self.ortho_bounds[2], this_ortho_bounds[3] - self.ortho_bounds[2],
self.ortho_bounds[1] - self.ortho_bounds[0], self.ortho_bounds[3] - self.ortho_bounds[2]))
ortho_data = self._get_orthorectified_version(
this_ortho_bounds, this_pixel_bounds,
self._calculator[this_pixel_bounds[0]:this_pixel_bounds[1], this_pixel_bounds[2]:this_pixel_bounds[3]])
# determine the relative image size
start_indices = (this_ortho_bounds[0] - self.ortho_bounds[0],
this_ortho_bounds[2] - self.ortho_bounds[2])
return ortho_data, start_indices
def next(self) -> Tuple[numpy.ndarray, Tuple[int, int]]:
"""
Get the next iteration of ortho-rectified data.
Returns
-------
data: numpy.ndarray
indices: Tuple[int, int]
The (normalized) indices `(start_row, start_col)` for this section of
data, relative to overall output shape.
"""
# NB: this is the Python 2 pattern for iteration
return self.__next__()
| 24,625 | 34.280802 | 115 | py |
sarpy | sarpy-master/sarpy/processing/ortho_rectify/__init__.py |
__classification__ = 'UNCLASSIFIED'
from .base import FullResolutionFetcher, OrthorectificationIterator
from .ortho_methods import OrthorectificationHelper, NearestNeighborMethod, BivariateSplineMethod
from .projection_helper import ProjectionHelper, PGProjection, PGRatPolyProjection
| 287 | 40.142857 | 97 | py |
sarpy | sarpy-master/sarpy/processing/ortho_rectify/projection_helper.py | """
Unified methods of projection between sicd pixel coordinates,
some ortho-rectified pixel grid coordinates, and geophysical coordinates
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import numpy
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.geometry.geocoords import geodetic_to_ecf, ecf_to_geodetic, wgs_84_norm
from sarpy.processing.rational_polynomial import SarpyRatPolyError, \
get_rational_poly_2d, get_rational_poly_3d, CombinedRationalPolynomial
logger = logging.getLogger(__name__)
_PIXEL_METHODOLOGY = ('MAX', 'MIN', 'MEAN', 'GEOM_MEAN')
class ProjectionHelper(object):
"""
Abstract helper class which defines the projection interface for
ortho-rectification usage for a sicd type object.
"""
__slots__ = ('_sicd', '_row_spacing', '_col_spacing', '_default_pixel_method')
def __init__(self, sicd, row_spacing=None, col_spacing=None, default_pixel_method='GEOM_MEAN'):
r"""
Parameters
----------
sicd : SICDType
The sicd object
row_spacing : None|float
The row pixel spacing. If not provided, this will default according
to `default_pixel_method`.
col_spacing : None|float
The row pixel spacing. If not provided, this will default according
to `default_pixel_method`.
default_pixel_method : str
Must be one of ('MAX', 'MIN', 'MEAN', 'GEOM_MEAN'). This determines
the default behavior for row_spacing/col_spacing. The default value for
row/column spacing will be the implied function applied to the range
and azimuth ground resolution. Note that geometric mean is defined as
:math:`\sqrt(x*x + y*y)`
"""
self._row_spacing = None
self._col_spacing = None
default_pixel_method = default_pixel_method.upper()
if default_pixel_method not in _PIXEL_METHODOLOGY:
raise ValueError(
'default_pixel_method got invalid value {}. Must be one '
'of {}'.format(default_pixel_method, _PIXEL_METHODOLOGY))
self._default_pixel_method = default_pixel_method
if not isinstance(sicd, SICDType):
raise TypeError('sicd must be a SICDType instance. Got type {}'.format(type(sicd)))
if not sicd.can_project_coordinates():
raise ValueError('Ortho-rectification requires the SICD ability to project coordinates.')
sicd.define_coa_projection(override=False)
self._sicd = sicd
self.row_spacing = row_spacing
self.col_spacing = col_spacing
@property
def sicd(self):
"""
SICDType: The sicd structure.
"""
return self._sicd
@property
def row_spacing(self):
"""
float: The row pixel spacing
"""
return self._row_spacing
@row_spacing.setter
def row_spacing(self, value):
"""
Set the row pixel spacing value. Setting to None will result in a
default value derived from the SICD structure being used.
Parameters
----------
value : None|float
Returns
-------
None
"""
if value is None:
if self.sicd.RadarCollection.Area is None:
self._row_spacing = self._get_sicd_ground_pixel()
else:
self._row_spacing = self.sicd.RadarCollection.Area.Plane.XDir.LineSpacing
else:
value = float(value)
if value <= 0:
raise ValueError('row pixel spacing must be positive.')
self._row_spacing = float(value)
@property
def col_spacing(self):
"""
float: The column pixel spacing
"""
return self._col_spacing
@col_spacing.setter
def col_spacing(self, value):
"""
Set the col pixel spacing value. Setting to NOne will result in a
default value derived from the SICD structure being used.
Parameters
----------
value : None|float
Returns
-------
None
"""
if value is None:
if self.sicd.RadarCollection.Area is None:
self._col_spacing = self._get_sicd_ground_pixel()
else:
self._col_spacing = self.sicd.RadarCollection.Area.Plane.YDir.SampleSpacing
else:
value = float(value)
if value <= 0:
raise ValueError('column pixel spacing must be positive.')
self._col_spacing = float(value)
def _get_sicd_ground_pixel(self):
"""
Gets the SICD ground pixel size.
Returns
-------
float
"""
ground_row_ss, ground_col_ss = self.sicd.get_ground_resolution()
if self._default_pixel_method == 'MIN':
return min(ground_row_ss, ground_col_ss)
elif self._default_pixel_method == 'MAX':
return max(ground_row_ss, ground_col_ss)
elif self._default_pixel_method == 'MEAN':
return 0.5*(ground_row_ss + ground_col_ss)
elif self._default_pixel_method == 'GEOM_MEAN':
return float(numpy.sqrt(ground_row_ss*ground_row_ss + ground_col_ss*ground_col_ss))
else:
raise ValueError('Got unhandled default_pixel_method {}'.format(self._default_pixel_method))
@staticmethod
def _reshape(array, final_dimension):
"""
Reshape the input so that the output is two-dimensional with final
dimension given by `final_dimension`.
Parameters
----------
array : numpy.ndarray|list|tuple
final_dimension : int
Returns
-------
(numpy.ndarray, tuple)
The reshaped data array and original shape.
"""
if not isinstance(array, numpy.ndarray):
array = numpy.array(array, dtype=numpy.float64)
if array.ndim < 1 or array.shape[-1] != final_dimension:
raise ValueError(
'ortho_coords must be at least one dimensional with final dimension '
'of size {}.'.format(final_dimension))
o_shape = array.shape
if array.ndim != 2:
array = numpy.reshape(array, (-1, final_dimension))
return array, o_shape
def ecf_to_ortho(self, coords):
"""
Gets the `(ortho_row, ortho_column)` coordinates in the ortho-rectified
system for the provided physical coordinates in ECF `(X, Y, Z)` coordinates.
Parameters
----------
coords : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def ecf_to_pixel(self, coords):
"""
Gets the `(pixel_row, pixel_column)` coordinates for the provided physical
coordinates in ECF `(X, Y, Z)` coordinates.
Parameters
----------
coords : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def ll_to_ortho(self, ll_coords):
"""
Gets the `(ortho_row, ortho_column)` coordinates in the ortho-rectified
system for the provided physical coordinates in `(Lat, Lon)` coordinates.
Note that there is inherent ambiguity when handling the missing elevation,
and the effect is likely methodology dependent.
Parameters
----------
ll_coords : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def llh_to_ortho(self, llh_coords):
"""
Gets the `(ortho_row, ortho_column)` coordinates in the ortho-rectified
system for the providednphysical coordinates in `(Lat, Lon, HAE)`
coordinates.
Parameters
----------
llh_coords : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def pixel_to_ortho(self, pixel_coords):
"""
Gets the ortho-rectified indices for the point(s) in pixel coordinates.
Parameters
----------
pixel_coords : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def pixel_to_ecf(self, pixel_coords):
"""
Gets the ECF coordinates for the point(s) in pixel coordinates.
Parameters
----------
pixel_coords : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def ortho_to_ecf(self, ortho_coords):
"""
Get the ecf coordinates for the point(s) in ortho-rectified coordinates.
Parameters
----------
ortho_coords : numpy.ndarray
Point(s) in the ortho-recitified coordinate system, of the form
`(ortho_row, ortho_column)`.
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def ortho_to_llh(self, ortho_coords):
"""
Get the lat/lon/hae coordinates for the point(s) in ortho-rectified coordinates.
Parameters
----------
ortho_coords : numpy.ndarray
Point(s) in the ortho-recitified coordinate system, of the form
`(ortho_row, ortho_column)`.
Returns
-------
numpy.ndarray
"""
ecf = self.ortho_to_ecf(ortho_coords)
return ecf_to_geodetic(ecf)
def ortho_to_pixel(self, ortho_coords):
"""
Get the pixel indices for the point(s) in ortho-rectified coordinates.
Parameters
----------
ortho_coords : numpy.ndarray
Point(s) in the ortho-recitified coordinate system, of the form
`(ortho_row, ortho_column)`.
Returns
-------
numpy.ndarray
The array of indices, of the same shape as `new_coords`, which indicate
`(row, column)` pixel (fractional) indices.
"""
raise NotImplementedError
def get_pixel_array_bounds(self, coords):
"""
Extract integer bounds of the input array, expected to have final dimension
of size 2.
Parameters
----------
coords : numpy.ndarray
Returns
-------
numpy.ndarray
Of the form `(min_row, max_row, min_column, max_column)`.
"""
coords, o_shape = self._reshape(coords, 2)
return numpy.array(
(numpy.ceil(numpy.nanmin(coords[:, 0], axis=0)),
numpy.floor(numpy.nanmax(coords[:, 0], axis=0)),
numpy.ceil(numpy.nanmin(coords[:, 1], axis=0)),
numpy.floor(numpy.nanmax(coords[:, 1], axis=0))), dtype=numpy.int64)
class PGProjection(ProjectionHelper):
"""
Class which helps perform the Planar Grid (i.e. Ground Plane) ortho-rectification
for a sicd-type object using the SICD projection model directly.
"""
__slots__ = (
'_reference_point', '_reference_pixels', '_row_vector', '_col_vector', '_normal_vector', '_reference_hae')
def __init__(self, sicd, reference_point=None, reference_pixels=None, normal_vector=None, row_vector=None,
col_vector=None, row_spacing=None, col_spacing=None,
default_pixel_method='GEOM_MEAN'):
r"""
Parameters
----------
sicd : SICDType
The sicd object
reference_point : None|numpy.ndarray
The reference point (origin) of the planar grid. If None, a default
derived from the SICD will be used.
reference_pixels : None|numpy.ndarray
The projected pixel
normal_vector : None|numpy.ndarray
The unit normal vector of the plane.
row_vector : None|numpy.ndarray
The vector defining increasing column direction. If None, a default
derived from the SICD will be used.
col_vector : None|numpy.ndarray
The vector defining increasing column direction. If None, a default
derived from the SICD will be used.
row_spacing : None|float
The row pixel spacing.
col_spacing : None|float
The column pixel spacing.
default_pixel_method : str
Must be one of ('MAX', 'MIN', 'MEAN', 'GEOM_MEAN'). This determines
the default behavior for row_spacing/col_spacing. The default value for
row/column spacing will be the implied function applied to the range
and azimuth ground resolution. Note that geometric mean is defined as
:math:`\sqrt(x*x + y*y)`
"""
self._reference_point = None
self._reference_hae = None
self._reference_pixels = None
self._normal_vector = None
self._row_vector = None
self._col_vector = None
ProjectionHelper.__init__(
self, sicd, row_spacing=row_spacing, col_spacing=col_spacing, default_pixel_method=default_pixel_method)
self.set_reference_point(reference_point=reference_point)
self.set_reference_pixels(reference_pixels=reference_pixels)
self.set_plane_frame(
normal_vector=normal_vector, row_vector=row_vector, col_vector=col_vector)
@property
def reference_point(self):
# type: () -> numpy.ndarray
"""
numpy.ndarray: The grid reference point.
"""
return self._reference_point
@property
def reference_pixels(self):
# type: () -> numpy.ndarray
"""
numpy.ndarray: The ortho-rectified pixel coordinates of the grid reference point.
"""
return self._reference_pixels
@property
def normal_vector(self):
# type: () -> numpy.ndarray
"""
numpy.ndarray: The normal vector.
"""
return self._normal_vector
def set_reference_point(self, reference_point=None):
"""
Sets the reference point, which must be provided in ECF coordinates.
Parameters
----------
reference_point : None|numpy.ndarray
The reference point (origin) of the planar grid. If None, then the
`sicd.GeoData.SCP.ECF` will be used.
Returns
-------
None
"""
if reference_point is None:
if self.sicd.RadarCollection.Area is None:
reference_point = self.sicd.GeoData.SCP.ECF.get_array()
else:
reference_point = self.sicd.RadarCollection.Area.Plane.RefPt.ECF.get_array()
if not (isinstance(reference_point, numpy.ndarray) and reference_point.ndim == 1
and reference_point.size == 3):
raise ValueError('reference_point must be a vector of size 3.')
self._reference_point = reference_point
# set the reference hae
ref_llh = ecf_to_geodetic(reference_point)
self._reference_hae = ref_llh[2]
def set_reference_pixels(self, reference_pixels=None):
"""
Sets the reference point, which must be provided in ECF coordinates.
Parameters
----------
reference_pixels : None|numpy.ndarray
The ortho-rectified pixel coordinates for the reference point (origin) of the planar grid.
If None, then the (0, 0) will be used.
Returns
-------
None
"""
if reference_pixels is None:
if self.sicd.RadarCollection.Area is not None:
reference_pixels = numpy.array([
self.sicd.RadarCollection.Area.Plane.RefPt.Line,
self.sicd.RadarCollection.Area.Plane.RefPt.Sample],
dtype='float64')
else:
reference_pixels = numpy.zeros((2, ), dtype='float64')
if not (isinstance(reference_pixels, numpy.ndarray) and reference_pixels.ndim == 1
and reference_pixels.size == 2):
raise ValueError('reference_pixels must be a vector of size 2.')
self._reference_pixels = reference_pixels
@property
def row_vector(self):
"""
numpy.ndarray: The grid increasing row direction (ECF) unit vector.
"""
return self._row_vector
@property
def col_vector(self):
"""
numpy.ndarray: The grid increasing column direction (ECF) unit vector.
"""
return self._col_vector
@property
def reference_hae(self):
"""
float: The height above the ellipsoid of the reference point.
"""
return self._reference_hae
def set_plane_frame(self, normal_vector=None, row_vector=None, col_vector=None):
"""
Set the plane unit normal, and the row and column vectors, in ECF coordinates.
Note that the perpendicular component of col_vector with respect to the
row_vector will be used.
If `normal_vector`, `row_vector`, and `col_vector` are all `None`, then
the normal to the Earth tangent plane at the reference point is used for
`normal_vector`. The `row_vector` will be defined as the perpendicular
component of `sicd.Grid.Row.UVectECF` to `normal_vector`. The `colummn_vector`
will be defined as the component of `sicd.Grid.Col.UVectECF` perpendicular
to both `normal_vector` and `row_vector`.
If only `normal_vector` is supplied, then the `row_vector` and `column_vector`
will be defined similarly as the perpendicular components of
`sicd.Grid.Row.UVectECF` and `sicd.Grid.Col.UVectECF`.
Otherwise, all vectors supplied will be normalized, but are required to be
mutually perpendicular. If only two vectors are supplied, then the third
will be determined.
Parameters
----------
normal_vector : None|numpy.ndarray
The vector defining the outward unit normal in ECF coordinates.
row_vector : None|numpy.ndarray
The vector defining increasing column direction.
col_vector : None|numpy.ndarray
The vector defining increasing column direction.
Returns
-------
None
"""
def normalize(vec, name, perp=None):
if not isinstance(vec, numpy.ndarray):
vec = numpy.array(vec, dtype=numpy.float64)
if not (isinstance(vec, numpy.ndarray) and vec.ndim == 1 and vec.size == 3):
raise ValueError('{} vector must be a numpy.ndarray of dimension 1 and size 3.'.format(name))
vec = numpy.copy(vec)
if perp is None:
pass
elif isinstance(perp, numpy.ndarray):
vec = vec - perp*(perp.dot(vec))
else:
for entry in perp:
vec = vec - entry*(entry.dot(vec))
norm = numpy.linalg.norm(vec)
if norm == 0:
raise ValueError('{} vector cannot be the zero vector.'.format(name))
elif norm != 1:
vec = vec/norm # avoid modifying row_vector def exterior to this class
return vec
def check_perp(vec1, vec2, name1, name2, tolerance=1e-6):
if abs(vec1.dot(vec2)) > tolerance:
raise ValueError('{} vector and {} vector are not perpendicular'.format(name1, name2))
if self._reference_point is None:
raise ValueError('This requires that reference point is previously set.')
if normal_vector is None and row_vector is None and col_vector is None:
if self.sicd.RadarCollection.Area is None:
self._normal_vector = wgs_84_norm(self.reference_point)
self._row_vector = normalize(
self.sicd.Grid.Row.UVectECF.get_array(), 'row', perp=self.normal_vector)
self._col_vector = normalize(
self.sicd.Grid.Col.UVectECF.get_array(), 'column', perp=(self.normal_vector, self.row_vector))
else:
self._row_vector = self.sicd.RadarCollection.Area.Plane.XDir.UVectECF.get_array()
self._col_vector = normalize(
self.sicd.RadarCollection.Area.Plane.YDir.UVectECF.get_array(), 'col', perp=self._row_vector)
self._normal_vector = numpy.cross(self._row_vector, self._col_vector)
elif normal_vector is not None and row_vector is None and col_vector is None:
self._normal_vector = normalize(normal_vector, 'normal')
self._row_vector = normalize(
self.sicd.Grid.Row.UVectECF.get_array(), 'row', perp=self.normal_vector)
self._col_vector = normalize(
self.sicd.Grid.Col.UVectECF.get_array(), 'column', perp=(self.normal_vector, self.row_vector))
elif normal_vector is None:
if row_vector is None or col_vector is None:
raise ValueError('normal_vector is not defined, so both row_vector and col_vector must be.')
row_vector = normalize(row_vector, 'row')
col_vector = normalize(col_vector, 'col')
check_perp(row_vector, col_vector, 'row', 'col')
self._row_vector = row_vector
self._col_vector = col_vector
self._normal_vector = numpy.cross(row_vector, col_vector)
elif col_vector is None:
if row_vector is None:
raise ValueError('col_vector is not defined, so both normal_vector and row_vector must be.')
normal_vector = normalize(normal_vector, 'normal')
row_vector = normalize(row_vector, 'row')
check_perp(normal_vector, row_vector, 'normal', 'row')
self._normal_vector = normal_vector
self._row_vector = row_vector
self._col_vector = numpy.cross(self.normal_vector, self.row_vector)
elif row_vector is None:
normal_vector = normalize(normal_vector, 'normal')
col_vector = normalize(col_vector, 'col')
check_perp(normal_vector, col_vector, 'normal', 'col')
self._normal_vector = normal_vector
self._col_vector = col_vector
self._row_vector = numpy.cross(self.col_vector, self.normal_vector)
else:
normal_vector = normalize(normal_vector, 'normal')
row_vector = normalize(row_vector, 'row')
col_vector = normalize(col_vector, 'col')
check_perp(normal_vector, row_vector, 'normal', 'row')
check_perp(normal_vector, col_vector, 'normal', 'col')
check_perp(row_vector, col_vector, 'row', 'col')
self._normal_vector = normal_vector
self._row_vector = row_vector
self._col_vector = col_vector
# check for outward unit norm
if numpy.dot(self.normal_vector, self.reference_point) < 0:
logger.warning(
'The normal vector appears to be outward pointing, so reversing.')
self._normal_vector *= -1
def plane_ecf_to_ortho(self, coords):
"""
Converts ECF coordinates **known to be in the ground plane** to ortho grid coordinates.
Parameters
----------
coords : numpy.ndarray
Returns
-------
numpy.ndarray
"""
coords, o_shape = self._reshape(coords, 3)
diff = coords - self.reference_point
if len(o_shape) == 1:
out = numpy.zeros((2, ), dtype=numpy.float64)
out[0] = self._reference_pixels[0] + numpy.sum(diff*self.row_vector)/self.row_spacing
out[1] = self._reference_pixels[1] + numpy.sum(diff*self.col_vector)/self.col_spacing
else:
out = numpy.zeros((coords.shape[0], 2), dtype=numpy.float64)
out[:, 0] = self._reference_pixels[0] + numpy.sum(diff*self.row_vector, axis=1)/self.row_spacing
out[:, 1] = self._reference_pixels[1] + numpy.sum(diff*self.col_vector, axis=1)/self.col_spacing
out = numpy.reshape(out, o_shape[:-1] + (2, ))
return out
def ecf_to_ortho(self, coords):
return self.pixel_to_ortho(self.ecf_to_pixel(coords))
def ecf_to_pixel(self, coords):
pixel, _, _ = self.sicd.project_ground_to_image(coords, tolerance=1e-6, max_iterations=40)
return pixel
def ll_to_ortho(self, ll_coords):
"""
Gets the `(ortho_row, ortho_column)` coordinates in the ortho-rectified
system for the provided physical coordinates in `(Lat, Lon)` coordinates.
In this case, the missing altitude will be set to `reference_hae`, which
is imperfect.
Parameters
----------
ll_coords : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
ll_coords, o_shape = self._reshape(ll_coords, 2)
llh_temp = numpy.zeros((ll_coords.shape[0], 3), dtype=numpy.float64)
llh_temp[:, :2] = ll_coords
llh_temp[:, 2] = self.reference_hae
llh_temp = numpy.reshape(llh_temp, o_shape[:-1] + (3, ))
return self.llh_to_ortho(llh_temp)
def llh_to_ortho(self, llh_coords):
llh_coords, o_shape = self._reshape(llh_coords, 3)
ground = geodetic_to_ecf(llh_coords)
return self.ecf_to_ortho(numpy.reshape(ground, o_shape))
def ortho_to_ecf(self, ortho_coords):
ortho_coords, o_shape = self._reshape(ortho_coords, 2)
xs = (ortho_coords[:, 0] - self._reference_pixels[0])*self.row_spacing
ys = (ortho_coords[:, 1] - self._reference_pixels[1])*self.col_spacing
if xs.ndim == 0:
coords = self.reference_point + xs*self.row_vector + ys*self.col_vector
else:
coords = self.reference_point + numpy.outer(xs, self.row_vector) + \
numpy.outer(ys, self.col_vector)
return numpy.reshape(coords, o_shape[:-1] + (3, ))
def ortho_to_pixel(self, ortho_coords):
ortho_coords, o_shape = self._reshape(ortho_coords, 2)
pixel, _, _ = self.sicd.project_ground_to_image(self.ortho_to_ecf(ortho_coords), tolerance=1e-3, max_iterations=25)
return numpy.reshape(pixel, o_shape)
def pixel_to_ortho(self, pixel_coords):
return self.plane_ecf_to_ortho(self.pixel_to_ecf(pixel_coords))
def pixel_to_ecf(self, pixel_coords):
return self.sicd.project_image_to_ground(
pixel_coords, projection_type='PLANE',
gref=self.reference_point, ugpn=self.normal_vector)
class PGRatPolyProjection(PGProjection):
__slots__ = (
'_reference_point', '_reference_pixels', '_row_vector', '_col_vector',
'_normal_vector', '_reference_hae',
'_row_samples', '_col_samples', '_alt_samples', '_alt_span',
'_ecf_to_pixel_func', '_pixel_to_ortho_func', '_ortho_to_pixel_func')
def __init__(self, sicd, reference_point=None, reference_pixels=None, normal_vector=None, row_vector=None,
col_vector=None, row_spacing=None, col_spacing=None,
default_pixel_method='GEOM_MEAN',
row_samples=51, col_samples=51, alt_samples=11, alt_span=250):
r"""
Parameters
----------
sicd : SICDType
The sicd object
reference_point : None|numpy.ndarray
The reference point (origin) of the planar grid. If None, a default
derived from the SICD will be used.
reference_pixels : None|numpy.ndarray
The projected pixel
normal_vector : None|numpy.ndarray
The unit normal vector of the plane.
row_vector : None|numpy.ndarray
The vector defining increasing column direction. If None, a default
derived from the SICD will be used.
col_vector : None|numpy.ndarray
The vector defining increasing column direction. If None, a default
derived from the SICD will be used.
row_spacing : None|float
The row pixel spacing.
col_spacing : None|float
The column pixel spacing.
default_pixel_method : str
Must be one of ('MAX', 'MIN', 'MEAN', 'GEOM_MEAN'). This determines
the default behavior for row_spacing/col_spacing. The default value for
row/column spacing will be the implied function applied to the range
and azimuth ground resolution. Note that geometric mean is defined as
:math:`\sqrt(x*x + y*y)`
row_samples : int
How many row samples to use in fitting
col_samples : int
How many column samples to use in fitting
alt_samples : int
How many altitude samples to use in fitting
alt_span : int|float
Fitting for reference point hae +/- alt_span information.
"""
self._ecf_to_pixel_func = None
self._pixel_to_ortho_func = None
self._ortho_to_pixel_func = None
self._row_samples = int(row_samples)
self._col_samples = int(col_samples)
self._alt_samples = int(alt_samples)
self._alt_span = float(alt_span)
PGProjection.__init__(
self, sicd, reference_point=reference_point, reference_pixels=reference_pixels,
normal_vector=normal_vector, row_vector=row_vector, col_vector=col_vector,
row_spacing=row_spacing, col_spacing=col_spacing, default_pixel_method=default_pixel_method)
self.perform_rational_poly_fitting()
def _perform_ecf_func_fitting(self):
num_rows = self.sicd.ImageData.NumRows
num_cols = self.sicd.ImageData.NumCols
row_array = numpy.linspace(0, num_rows-1, self._row_samples)
col_array = numpy.linspace(0, num_cols-1, self._col_samples)
hae_array = self.reference_hae + numpy.linspace(
-self._alt_span, self._alt_span, self._alt_samples)
row_col_grid = numpy.empty((row_array.size, col_array.size, 2), dtype='float64')
row_col_grid[:, :, 1], row_col_grid[:, :, 0] = numpy.meshgrid(col_array, row_array)
ECF_data = numpy.empty((row_array.size, col_array.size, hae_array.size, 3))
for i, hae0 in enumerate(hae_array):
ECF_data[:, :, i, :] = self.sicd.project_image_to_ground(row_col_grid, projection_type='HAE', hae0=hae0)
if not numpy.all(numpy.isfinite(ECF_data)):
raise SarpyRatPolyError(
'NaN values are encountered when projecting across the image area,\n\t'
'this SICD is not a good candidate for projection using rational polynomials')
row_func = get_rational_poly_3d(
ECF_data[:, :, :, 0].flatten(), ECF_data[:, :, :, 1].flatten(), ECF_data[:, :, :, 2].flatten(),
numpy.stack([row_col_grid[:, :, 0] for _ in range(self._alt_samples)], axis=2).flatten(), order=3)
col_func = get_rational_poly_3d(
ECF_data[:, :, :, 0].flatten(), ECF_data[:, :, :, 1].flatten(), ECF_data[:, :, :, 2].flatten(),
numpy.stack([row_col_grid[:, :, 1] for _ in range(self._alt_samples)], axis=2).flatten(), order=3)
self._ecf_to_pixel_func = CombinedRationalPolynomial(row_func, col_func)
def _perform_pixel_fitting(self):
num_rows = self.sicd.ImageData.NumRows
num_cols = self.sicd.ImageData.NumCols
row_array = numpy.linspace(0, num_rows-1, 2*self._row_samples)
col_array = numpy.linspace(0, num_cols-1, 2*self._col_samples)
pixel_data = numpy.empty((row_array.size, col_array.size, 2), dtype='float64')
pixel_data[:, :, 1], pixel_data[:, :, 0] = numpy.meshgrid(col_array, row_array)
ortho_data = PGProjection.pixel_to_ortho(self, pixel_data)
pix_to_orth_row = get_rational_poly_2d(
pixel_data[:, :, 0].flatten(), pixel_data[:, :, 1].flatten(),
ortho_data[:, :, 0], order=3)
pix_to_orth_col = get_rational_poly_2d(
pixel_data[:, :, 0].flatten(), pixel_data[:, :, 1].flatten(),
ortho_data[:, :, 1], order=3)
self._pixel_to_ortho_func = CombinedRationalPolynomial(pix_to_orth_row, pix_to_orth_col)
orth_to_pix_row = get_rational_poly_2d(
ortho_data[:, :, 0].flatten(), ortho_data[:, :, 1].flatten(),
pixel_data[:, :, 0], order=3)
orth_to_pix_col = get_rational_poly_2d(
ortho_data[:, :, 0].flatten(), ortho_data[:, :, 1].flatten(),
pixel_data[:, :, 1], order=3)
self._ortho_to_pixel_func = CombinedRationalPolynomial(orth_to_pix_row, orth_to_pix_col)
def perform_rational_poly_fitting(self):
"""
Defined the rational polynomial functions via fitting.
"""
self._perform_ecf_func_fitting()
self._perform_pixel_fitting()
def ecf_to_ortho(self, coords):
return self.pixel_to_ortho(self.ecf_to_pixel(coords))
def ecf_to_pixel(self, coords):
return self._ecf_to_pixel_func(coords, combine=True)
def ortho_to_pixel(self, ortho_coords):
return self._ortho_to_pixel_func(ortho_coords, combine=True)
def pixel_to_ortho(self, pixel_coords):
return self._pixel_to_ortho_func(pixel_coords, combine=True)
| 33,381 | 36.80521 | 123 | py |
sarpy | sarpy-master/sarpy/processing/ortho_rectify/ortho_methods.py | """
Methods for ortho-rectification
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from typing import Union
import numpy
from scipy.interpolate import RectBivariateSpline
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.blocks import Poly2DType
from sarpy.geometry.geometry_elements import GeometryObject
from .projection_helper import ProjectionHelper, PGProjection, PGRatPolyProjection
from ..rational_polynomial import SarpyRatPolyError
logger = logging.getLogger(__name__)
def _linear_fill(pixel_array, fill_interval=1):
"""
This is to final in linear features in pixel space at the given interval.
Parameters
----------
pixel_array : numpy.ndarray
fill_interval : int|float
Returns
-------
numpy.ndarray
"""
if not isinstance(pixel_array, numpy.ndarray):
raise TypeError('pixel_array must be a numpy array. Got type {}'.format(type(pixel_array)))
if pixel_array.ndim < 2:
# nothing to be done
return pixel_array
if pixel_array.ndim > 2:
raise ValueError('pixel_array must be no more than two-dimensional. Got shape {}'.format(pixel_array.shape))
if pixel_array.shape[1] != 2:
raise ValueError(
'pixel_array is two dimensional, and the final dimension must have length 2. '
'Got shape {}'.format(pixel_array.shape))
if pixel_array.shape[0] < 2:
# nothing to be done
return pixel_array
def make_segment(start_point, end_point):
segment_length = numpy.linalg.norm(end_point - start_point)
segment_count = max(2, int(numpy.ceil(segment_length/float(fill_interval) + 1)))
segment = numpy.zeros((segment_count, 2), dtype=numpy.float64)
# NB: it's ok if start == end in linspace
segment[:, 0] = numpy.linspace(start_point[0], end_point[0], segment_count)
segment[:, 1] = numpy.linspace(start_point[1], end_point[1], segment_count)
return segment
segments = []
for i in range(pixel_array.shape[0]-1):
start_segment = pixel_array[i, :]
end_segment = pixel_array[i+1, :]
segments.append(make_segment(start_segment, end_segment))
return numpy.vstack(segments)
class OrthorectificationHelper(object):
"""
Abstract helper class which defines ortho-rectification process for a sicd-type
reader object.
"""
__slots__ = (
'_reader', '_index', '_sicd', '_proj_helper', '_out_dtype', '_complex_valued',
'_pad_value', '_apply_radiometric', '_subtract_radiometric_noise',
'_rad_poly', '_noise_poly', '_default_physical_bounds')
def __init__(self, reader, index=0, proj_helper=None, complex_valued=False,
pad_value=None, apply_radiometric=None, subtract_radiometric_noise=False):
"""
Parameters
----------
reader : SICDTypeReader
index : int
proj_helper : None|ProjectionHelper
If `None`, this will default to `PGRatPolyProjection(<sicd>)` unless there is
a SarpyRatPolyError, when it will fall back to `PGProjection(<sicd>)`,
where `<sicd>` will be the sicd from `reader` at `index`. Otherwise,
it is the user's responsibility to ensure that `reader`, `index` and
`proj_helper` are in sync.
complex_valued : bool
Do we want complex values returned? If `False`, the magnitude values
will be used.
pad_value : None|Any
Value to use for any out-of-range pixels. Defaults to `0` if not provided.
apply_radiometric : None|str
If provided, must be one of `['RCS', 'Sigma0', 'Gamma0', 'Beta0']`
(not case-sensitive). This will apply the given radiometric scale factor
to calculated pixel power, with noise subtracted if `subtract_radiometric_noise = True`.
**Only valid if `complex_valued=False`**.
subtract_radiometric_noise : bool
This indicates whether the radiometric noise should be subtracted from
the pixel amplitude. **Only valid if `complex_valued=False`**.
"""
self._index = None
self._sicd = None
self._proj_helper = None
self._apply_radiometric = None
self._subtract_radiometric_noise = None
self._rad_poly = None # type: [None, Poly2DType]
self._noise_poly = None # type: [None, Poly2DType]
self._default_physical_bounds = None
self._pad_value = pad_value
self._complex_valued = complex_valued
if self._complex_valued:
self._out_dtype = numpy.dtype('complex64')
else:
self._out_dtype = numpy.dtype('float32')
if not isinstance(reader, SICDTypeReader):
raise TypeError('Got unexpected type {} for reader'.format(type(reader)))
self._reader = reader
self.apply_radiometric = apply_radiometric
self.subtract_radiometric_noise = subtract_radiometric_noise
self.set_index_and_proj_helper(index, proj_helper=proj_helper)
@property
def reader(self):
# type: () -> SICDTypeReader
"""
SICDTypeReader: The reader instance.
"""
return self._reader
@property
def index(self):
# type: () -> int
"""
int: The index for the desired sicd element.
"""
return self._index
@property
def sicd(self):
# type: () -> SICDType
"""
SICDType: The sicd structure.
"""
return self._sicd
@property
def proj_helper(self):
# type: () -> ProjectionHelper
"""
ProjectionHelper: The projection helper instance.
"""
return self._proj_helper
@property
def out_dtype(self):
# type: () -> numpy.dtype
"""
numpy.dtype: The output data type.
"""
return self._out_dtype
@property
def pad_value(self):
"""
The value to use for any portions of the array which extend beyond the range
of where the reader has data.
"""
return self._pad_value
@pad_value.setter
def pad_value(self, value):
self._pad_value = value
def set_index_and_proj_helper(self, index, proj_helper=None):
"""
Sets the index and proj_helper objects.
Parameters
----------
index : int
proj_helper : ProjectionHelper
Returns
-------
None
"""
self._index = index
self._sicd = self.reader.get_sicds_as_tuple()[index]
self._is_radiometric_valid()
self._is_radiometric_noise_valid()
default_ortho_bounds = None
if proj_helper is None:
try:
proj_helper = PGRatPolyProjection(self.sicd)
except SarpyRatPolyError:
proj_helper = PGProjection(self.sicd)
if self.sicd.RadarCollection is not None and self.sicd.RadarCollection.Area is not None \
and self.sicd.RadarCollection.Area.Plane is not None:
plane = self.sicd.RadarCollection.Area.Plane
default_ortho_bounds = numpy.array([
plane.XDir.FirstLine, plane.XDir.FirstLine+plane.XDir.NumLines,
plane.YDir.FirstSample, plane.YDir.FirstSample+plane.YDir.NumSamples], dtype=numpy.uint32)
if not isinstance(proj_helper, ProjectionHelper):
raise TypeError('Got unexpected type {} for proj_helper'.format(proj_helper))
self._proj_helper = proj_helper
if default_ortho_bounds is not None:
_, ortho_rectangle = self.bounds_to_rectangle(default_ortho_bounds)
self._default_physical_bounds = self.proj_helper.ortho_to_ecf(ortho_rectangle)
@property
def apply_radiometric(self):
# type: () -> Union[None, str]
"""
None|str: This indicates which, if any, of the radiometric scale factors
to apply in the result. If not `None`, this must be one of ('RCS', 'SIGMA0', 'GAMMA0', 'BETA0').
Setting to a value other than `None` will result in an error if 1.) `complex_valued` is `True`, or
2.) the appropriate corresponding element `sicd.Radiometric.RCSSFPoly`,
`sicd.Radiometric.SigmaZeroSFPoly`, `sicd.Radiometric.GammaZeroSFPoly`, or
`sicd.Radiometric.BetaZeroSFPoly` is not populated with a valid polynomial.
"""
return self._apply_radiometric
@apply_radiometric.setter
def apply_radiometric(self, value):
if value is None:
self._apply_radiometric = None
elif isinstance(value, str):
val = value.upper()
allowed = ('RCS', 'SIGMA0', 'GAMMA0', 'BETA0')
if val not in allowed:
raise ValueError('Require that value is one of {}, got {}'.format(allowed, val))
self._apply_radiometric = val
self._is_radiometric_valid()
else:
raise TypeError('Got unexpected type {} for apply_radiometric'.format(type(value)))
@property
def subtract_radiometric_noise(self):
"""
bool: This indicates whether the radiometric noise should be subtracted from
the pixel amplitude. If `apply_radiometric` is not `None`, then this subtraction
will happen applying the corresponding scaling.
Setting this to `True` will **result in an error** unless the given sicd structure has
`sicd.Radiometric.NoiseLevel.NoisePoly` populated with a viable polynomial and
`sicd.Radiometric.NoiseLevel.NoiseLevelType == 'ABSOLUTE'`.
"""
return self._subtract_radiometric_noise
@subtract_radiometric_noise.setter
def subtract_radiometric_noise(self, value):
if value:
self._subtract_radiometric_noise = True
else:
self._subtract_radiometric_noise = False
self._is_radiometric_noise_valid()
def _is_radiometric_valid(self):
"""
Checks whether the apply radiometric settings are valid.
Returns
-------
None
"""
if self.apply_radiometric is None:
self._rad_poly = None
return # nothing to be done
if self._complex_valued:
raise ValueError('apply_radiometric is not None, which requires real valued output.')
if self.sicd is None:
return # nothing to be done, no sicd set (yet)
if self.sicd.Radiometric is None:
raise ValueError(
'apply_radiometric is {}, but sicd.Radiometric is unpopulated.'.format(self.apply_radiometric))
if self.apply_radiometric == 'RCS':
if self.sicd.Radiometric.RCSSFPoly is None:
raise ValueError('apply_radiometric is "RCS", but the sicd.Radiometric.RCSSFPoly is not populated.')
else:
self._rad_poly = self.sicd.Radiometric.RCSSFPoly
elif self.apply_radiometric == 'SIGMA0':
if self.sicd.Radiometric.SigmaZeroSFPoly is None:
raise ValueError(
'apply_radiometric is "SIGMA0", but the sicd.Radiometric.SigmaZeroSFPoly is not populated.')
else:
self._rad_poly = self.sicd.Radiometric.SigmaZeroSFPoly
elif self.apply_radiometric == 'GAMMA0':
if self.sicd.Radiometric.GammaZeroSFPoly is None:
raise ValueError(
'apply_radiometric is "GAMMA0", but the sicd.Radiometric.GammaZeroSFPoly is not populated.')
else:
self._rad_poly = self.sicd.Radiometric.GammaZeroSFPoly
elif self.apply_radiometric == 'BETA0':
if self.sicd.Radiometric.BetaZeroSFPoly is None:
raise ValueError(
'apply_radiometric is "BETA0", but the sicd.Radiometric.BetaZeroSFPoly is not populated.')
else:
self._rad_poly = self.sicd.Radiometric.BetaZeroSFPoly
else:
raise ValueError('Got unhandled value {} for apply_radiometric'.format(self.apply_radiometric))
def _is_radiometric_noise_valid(self):
"""
Checks whether the subtract_radiometric_noise setting is valid.
Returns
-------
None
"""
if not self.subtract_radiometric_noise:
self._noise_poly = None
return # nothing to be done
if self._complex_valued:
raise ValueError('subtract_radiometric_noise is True, which requires real valued output.')
if self.sicd is None:
return # nothing to be done, no sicd set (yet)
# set the noise polynomial value
if self.sicd.Radiometric is None:
raise ValueError(
'subtract_radiometric_noise is True,\n\t'
'but sicd.Radiometric is unpopulated.')
if self.sicd.Radiometric.NoiseLevel is None:
raise ValueError(
'subtract_radiometric_noise is set to True,\n\t'
'but sicd.Radiometric.NoiseLevel is not populated.')
if self.sicd.Radiometric.NoiseLevel.NoisePoly is None:
raise ValueError(
'subtract_radiometric_noise is set to True,\n\t'
'but sicd.Radiometric.NoiseLevel.NoisePoly is not populated.')
if self.sicd.Radiometric.NoiseLevel.NoiseLevelType == 'RELATIVE':
raise ValueError(
'subtract_radiometric_noise is set to True,\n\t'
'but sicd.Radiometric.NoiseLevel.NoiseLevelType is "RELATIVE"')
self._noise_poly = self.sicd.Radiometric.NoiseLevel.NoisePoly
def get_full_ortho_bounds(self):
"""
Gets the bounds for the ortho-rectified coordinates for the full sicd image.
Returns
-------
numpy.ndarray
Of the form `[min row, max row, min column, max column]`.
"""
if self._default_physical_bounds is not None:
ortho_rectangle = self.proj_helper.ecf_to_ortho(self._default_physical_bounds)
return self.proj_helper.get_pixel_array_bounds(ortho_rectangle)
full_coords = self.sicd.ImageData.get_full_vertex_data()
full_line = _linear_fill(full_coords, fill_interval=1)
return self.get_orthorectification_bounds_from_pixel_object(full_line)
def get_valid_ortho_bounds(self):
"""
Gets the bounds for the ortho-rectified coordinates for the valid portion
of the sicd image. This is the outer bounds of the valid portion, so may contain
some portion which is not itself valid.
If sicd.ImageData.ValidData is not defined, then the full image bounds will
be returned.
Returns
-------
numpy.ndarray
Of the form `[min row, max row, min column, max column]`.
"""
if self._default_physical_bounds is not None:
ortho_rectangle = self.proj_helper.ecf_to_ortho(self._default_physical_bounds)
return self.proj_helper.get_pixel_array_bounds(ortho_rectangle)
valid_coords = self.sicd.ImageData.get_valid_vertex_data()
if valid_coords is None:
valid_coords = self.sicd.ImageData.get_full_vertex_data()
valid_line = _linear_fill(valid_coords, fill_interval=1)
return self.get_orthorectification_bounds_from_pixel_object(valid_line)
def get_orthorectification_bounds_from_pixel_object(self, coordinates):
"""
Determine the ortho-rectified (coordinate-system aligned) rectangular bounding
region which contains the provided coordinates in pixel space.
Parameters
----------
coordinates : GeometryObject|numpy.ndarray|list|tuple
The coordinate system of the input will be assumed to be pixel space.
Returns
-------
numpy.ndarray
Of the form `(row_min, row_max, col_min, col_max)`.
"""
if isinstance(coordinates, GeometryObject):
pixel_bounds = coordinates.get_bbox()
siz = int(len(pixel_bounds)/2)
coordinates = numpy.array(
[[pixel_bounds[0], pixel_bounds[1]],
[pixel_bounds[siz], pixel_bounds[1]],
[pixel_bounds[siz], pixel_bounds[siz]],
[pixel_bounds[0], pixel_bounds[siz]]], dtype=numpy.float64)
filled_coordinates = _linear_fill(coordinates, fill_interval=1)
ortho = self.proj_helper.pixel_to_ortho(filled_coordinates)
return self.proj_helper.get_pixel_array_bounds(ortho)
def get_orthorectification_bounds_from_latlon_object(self, coordinates):
"""
Determine the ortho-rectified (coordinate-system aligned) rectangular bounding
region which contains the provided coordinates in lat/lon space.
Parameters
----------
coordinates : GeometryObject|numpy.ndarray|list|tuple
The coordinate system of the input will be assumed to be lat/lon space.
**Note** a GeometryObject is expected to follow lon/lat ordering paradigm,
by convention.
Returns
-------
numpy.ndarray
Of the form `(row_min, row_max, col_min, col_max)`.
"""
if isinstance(coordinates, GeometryObject):
# Note we assume a geometry object is using lon/lat ordering of coordinates.
bounds = coordinates.get_bbox()
if len(bounds) == 4:
coordinates = numpy.array(
[[bounds[1], bounds[0]],
[bounds[1], bounds[2]],
[bounds[3], bounds[2]],
[bounds[3], bounds[0]]], dtype=numpy.float64)
elif len(bounds) >= 6:
siz = int(len(bounds)/2)
coordinates = numpy.array(
[[bounds[1], bounds[0], bounds[3]],
[bounds[1], bounds[siz], bounds[3]],
[bounds[3], bounds[2], bounds[3]],
[bounds[3], bounds[0], bounds[3]]], dtype=numpy.float64)
else:
raise ValueError(
'It is expected that the geometry object "coordinates" uses two '
'or three dimensional coordinates. Got {} for a bounding box.'.format(bounds))
if not isinstance(coordinates, numpy.ndarray):
coordinates = numpy.array(coordinates, dtype=numpy.float64)
if coordinates.shape[-1] == 2:
ortho = self.proj_helper.ll_to_ortho(coordinates)
elif coordinates.shape[-1] == 3:
ortho = self.proj_helper.llh_to_ortho(coordinates)
else:
raise ValueError('Got unexpected shape for coordinates {}'.format(coordinates.shape))
return self.proj_helper.get_pixel_array_bounds(ortho)
@staticmethod
def validate_bounds(bounds):
"""
Validate a pixel type bounds array.
Parameters
----------
bounds : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
if not isinstance(bounds, numpy.ndarray):
bounds = numpy.asarray(bounds)
if bounds.ndim != 1 or bounds.size != 4:
raise ValueError(
'bounds is required to be one-dimensional and size 4. '
'Got input shape {}'.format(bounds.shape))
if bounds[0] >= bounds[1] or bounds[2] >= bounds[3]:
raise ValueError(
'bounds is required to be of the form (min row, max row, min col, max col), '
'got {}'.format(bounds))
if issubclass(bounds.dtype.type, numpy.integer):
return bounds
else:
out = numpy.zeros((4,), dtype=numpy.int32)
out[0:3:2] = (numpy.floor(bounds[0]), numpy.floor(bounds[2]))
out[1:4:2] = (numpy.ceil(bounds[1]), numpy.ceil(bounds[3]))
return out
@staticmethod
def _get_ortho_mesh(ortho_bounds):
"""
Fetch a the grid of rows/columns coordinates for the desired rectangle.
Parameters
----------
ortho_bounds : numpy.ndarray
Of the form `(min row, max row, min col, max col)`.
Returns
-------
numpy.ndarray
"""
ortho_shape = (int(ortho_bounds[1]-ortho_bounds[0]), int(ortho_bounds[3]-ortho_bounds[2]), 2)
ortho_mesh = numpy.zeros(ortho_shape, dtype=numpy.int32)
ortho_mesh[:, :, 1], ortho_mesh[:, :, 0] = numpy.meshgrid(numpy.arange(ortho_bounds[2], ortho_bounds[3]),
numpy.arange(ortho_bounds[0], ortho_bounds[1]))
return ortho_mesh
@staticmethod
def _get_mask(pixel_rows, pixel_cols, row_array, col_array):
"""
Construct the valid mask. This is a helper function, and no error checking
will be performed for any issues.
Parameters
----------
pixel_rows : numpy.ndarray
The array of pixel rows. Must be the same shape as `pixel_cols`.
pixel_cols : numpy.ndarray
The array of pixel columns. Must be the same shape as `pixel_rows'.
row_array : numpy.ndarray
The rows array used for bounds, must be one dimensional and monotonic.
col_array : numpy.ndarray
The columns array used for bounds, must be one dimensional and monotonic.
Returns
-------
numpy.ndarray
"""
mask = (numpy.isfinite(pixel_rows) &
numpy.isfinite(pixel_cols) &
(pixel_rows >= row_array[0]) & (pixel_rows < row_array[-1]) &
(pixel_cols >= col_array[0]) & (pixel_cols < col_array[-1]))
return mask
def bounds_to_rectangle(self, bounds):
"""
From a bounds style array, construct the four corner coordinate array.
This follows the SICD convention of going CLOCKWISE around the corners.
Parameters
----------
bounds : numpy.ndarray|list|tuple
Of the form `(row min, row max, col min, col max)`.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The (integer valued) bounds and rectangular coordinates.
"""
bounds = self.validate_bounds(bounds)
coords = numpy.zeros((4, 2), dtype=numpy.int32)
coords[0, :] = (bounds[0], bounds[2]) # row min, col min
coords[1, :] = (bounds[0], bounds[3]) # row min, col max
coords[2, :] = (bounds[1], bounds[3]) # row max, col max
coords[3, :] = (bounds[1], bounds[2]) # row max, col min
return bounds, coords
def extract_pixel_bounds(self, bounds):
"""
Validate the bounds array of orthorectified pixel coordinates, and determine
the required bounds in reader pixel coordinates. If the
Parameters
----------
bounds : numpy.ndarray|list|tuple
Returns
-------
(numpy.ndarray, numpy.ndarray)
The integer valued orthorectified and reader pixel coordinate bounds.
"""
bounds, coords = self.bounds_to_rectangle(bounds)
filled_coords = _linear_fill(coords, fill_interval=1)
pixel_coords = self.proj_helper.ortho_to_pixel(filled_coords)
pixel_bounds = self.proj_helper.get_pixel_array_bounds(pixel_coords)
return bounds, self.validate_bounds(pixel_bounds)
def _initialize_workspace(self, ortho_bounds, final_dimension=0):
"""
Initialize the orthorectification array workspace.
Parameters
----------
ortho_bounds : numpy.ndarray
Of the form `(min row, max row, min col, max col)`.
final_dimension : int
The size of the third dimension. If `0`, then it will be omitted.
Returns
-------
numpy.ndarray
"""
if final_dimension > 0:
out_shape = (
int(ortho_bounds[1] - ortho_bounds[0]),
int(ortho_bounds[3] - ortho_bounds[2]),
int(final_dimension))
else:
out_shape = (
int(ortho_bounds[1]-ortho_bounds[0]),
int(ortho_bounds[3]-ortho_bounds[2]))
return numpy.zeros(out_shape, dtype=self.out_dtype) if self._pad_value is None else \
numpy.full(out_shape, self._pad_value, dtype=self.out_dtype)
def get_real_pixel_bounds(self, pixel_bounds):
"""
Fetch the real pixel limit from the nominal pixel limits - this just
factors in the image reader extent.
Parameters
----------
pixel_bounds : numpy.ndarray
Returns
-------
numpy.ndarray
"""
if pixel_bounds[0] > pixel_bounds[1] or pixel_bounds[2] > pixel_bounds[3]:
raise ValueError('Got unexpected and invalid pixel_bounds array {}'.format(pixel_bounds))
pixel_limits = self.reader.get_data_size_as_tuple()[self.index]
if (pixel_bounds[0] >= pixel_limits[0]) or (pixel_bounds[1] < 0) or \
(pixel_bounds[2] >= pixel_limits[1]) or (pixel_bounds[3] < 0):
# this entirely misses the whole region
return numpy.array([0, 0, 0, 0], dtype=numpy.int32)
real_pix_bounds = numpy.array([
max(0, pixel_bounds[0]), min(pixel_limits[0], pixel_bounds[1]),
max(0, pixel_bounds[2]), min(pixel_limits[1], pixel_bounds[3])], dtype=numpy.int32)
return real_pix_bounds
def _apply_radiometric_params(self, pixel_rows, pixel_cols, value_array):
"""
Apply the radiometric parameters to the solution array.
Parameters
----------
pixel_rows : numpy.ndarray
pixel_cols : numpy.ndarray
value_array : numpy.ndarray
Returns
-------
numpy.ndarray
"""
if self._rad_poly is None and self._noise_poly is None:
# nothing to be done.
return value_array
if pixel_rows.shape == value_array.shape and pixel_cols.shape == value_array.shape:
rows_meters = (pixel_rows - self.sicd.ImageData.SCPPixel.Row)*self.sicd.Grid.Row.SS
cols_meters = (pixel_cols - self.sicd.ImageData.SCPPixel.Col)*self.sicd.Grid.Col.SS
elif value_array.ndim == 2 and \
(pixel_rows.ndim == 1 and pixel_rows.size == value_array.shape[0]) and \
(pixel_cols.ndim == 1 and pixel_cols.size == value_array.shape[1]):
cols_meters, rows_meters = numpy.meshgrid(
(pixel_cols - self.sicd.ImageData.SCPPixel.Col)*self.sicd.Grid.Col.SS,
(pixel_rows - self.sicd.ImageData.SCPPixel.Row) * self.sicd.Grid.Row.SS)
else:
raise ValueError(
'Either pixel_rows, pixel_cols, and value_array must all have the same shape, '
'or pixel_rows/pixel_cols are one dimensional and '
'value_array.shape = (pixel_rows.size, pixel_cols.size). Got shapes {}, {}, and '
'{}'.format(pixel_rows.shape, pixel_cols.shape, value_array.shape))
# calculate pixel power, with noise subtracted if necessary
if self._noise_poly is not None:
noise = numpy.exp(10 * self._noise_poly(rows_meters, cols_meters)) # convert from db to power
pixel_power = value_array*value_array - noise
del noise
else:
pixel_power = value_array*value_array
if self._rad_poly is None:
return numpy.sqrt(pixel_power)
else:
return pixel_power*self._rad_poly(rows_meters, cols_meters)
def _validate_row_col_values(self, row_array, col_array, value_array, value_is_flat=False):
"""
Helper method for validating the types and shapes.
Parameters
----------
row_array : numpy.ndarray
The rows of the pixel array. Must be one-dimensional, monotonically
increasing, and have `row_array.size = value_array.shape[0]`.
col_array : numpy.ndarray
The columns of the pixel array. Must be one-dimensional, monotonically
increasing, and have and have `col_array.size = value_array.shape[1]`.
value_array : numpy.ndarray
The values array, whihc must be two or three dimensional. If this has
complex dtype and `complex_valued=False`, then the :func:`numpy.abs`
will be applied.
value_is_flat : bool
If `True`, then `value_array` must be exactly two-dimensional.
Returns
-------
numpy.ndarray
"""
# verify numpy arrays
if not isinstance(value_array, numpy.ndarray):
raise TypeError('value_array must be numpy.ndarray, got type {}'.format(type(value_array)))
if not isinstance(row_array, numpy.ndarray):
raise TypeError('row_array must be numpy.ndarray, got type {}'.format(type(row_array)))
if not isinstance(col_array, numpy.ndarray):
raise TypeError('col_array must be numpy.ndarray, got type {}'.format(type(col_array)))
# verify array shapes make sense
if value_array.ndim not in (2, 3):
raise ValueError('value_array must be two or three dimensional')
if row_array.ndim != 1 or row_array.size != value_array.shape[0]:
raise ValueError(
'We must have row_array is one dimensional and row_array.size = value.array.shape[0]. '
'Got row_array.shape = {}, and value_array = {}'.format(row_array.shape, value_array.shape))
if col_array.ndim != 1 or col_array.size != value_array.shape[1]:
raise ValueError(
'We must have col_array is one dimensional and col_array.size = value.array.shape[1]. '
'Got col_array.shape = {}, and value_array = {}'.format(col_array.shape, value_array.shape))
if value_is_flat and len(value_array.shape) != 2:
raise ValueError('value_array must be two-dimensional. Got shape {}'.format(value_array.shape))
# verify row_array/col_array are monotonically increasing
if numpy.any(numpy.diff(row_array.astype('float64')) <= 0):
raise ValueError('row_array must be monotonically increasing.')
if numpy.any(numpy.diff(col_array.astype('float64')) <= 0):
raise ValueError('col_array must be monotonically increasing.')
# address the dtype of value_array
if (not self._complex_valued) and numpy.iscomplexobj(value_array):
return numpy.abs(value_array)
return value_array
def get_orthorectified_from_array(self, ortho_bounds, row_array, col_array, value_array):
"""
Construct the orthorectified array covering the orthorectified region given by
`ortho_bounds` based on the `values_array`, which spans the pixel region defined by
`row_array` and `col_array`.
This is mainly a helper method, and should only be called directly for specific and
directed reasons.
Parameters
----------
ortho_bounds : numpy.ndarray
Determines the orthorectified bounds region, of the form
`(min row, max row, min column, max column)`.
row_array : numpy.ndarray
The rows of the pixel array. Must be one-dimensional, monotonically increasing,
and have `row_array.size = value_array.shape[0]`.
col_array
The columns of the pixel array. Must be one-dimensional, monotonically increasing,
and have `col_array.size = value_array.shape[1]`.
value_array
The values array. If this has complex dtype and `complex_valued=False`, then
the :func:`numpy.abs` will be applied.
Returns
-------
numpy.ndarray
"""
# validate our inputs
value_array = self._validate_row_col_values(row_array, col_array, value_array, value_is_flat=False)
if value_array.size == 0:
if value_array.ndim == 3:
return self._initialize_workspace(ortho_bounds, final_dimension=value_array.shape[2])
else:
return self._initialize_workspace(ortho_bounds)
if value_array.ndim == 2:
return self._get_orthrectified_from_array_flat(ortho_bounds, row_array, col_array, value_array)
else: # it must be three dimensional, as checked by _validate_row_col_values()
ortho_array = self._initialize_workspace(ortho_bounds, final_dimension=value_array.shape[2])
for i in range(value_array.shape[2]):
ortho_array[:, :, i] = self._get_orthrectified_from_array_flat(
ortho_bounds, row_array, col_array, value_array[:, :, i])
return ortho_array
def get_orthorectified_for_ortho_bounds(self, bounds):
"""
Determine the array corresponding to the array of bounds given in
ortho-rectified pixel coordinates.
Parameters
----------
bounds : numpy.ndarray|list|tuple
Of the form `(row_min, row_max, col_min, col_max)`. Note that non-integer
values will be expanded outwards (floor of minimum and ceil at maximum).
Following Python convention, this will be inclusive at the minimum and
exclusive at the maximum.
Returns
-------
numpy.ndarray
"""
ortho_bounds, nominal_pixel_bounds = self.extract_pixel_bounds(bounds)
# extract the values - ensure that things are within proper image bounds
pixel_bounds = self.get_real_pixel_bounds(nominal_pixel_bounds)
pixel_array = self.reader[
pixel_bounds[0]:pixel_bounds[1], pixel_bounds[2]:pixel_bounds[3], self.index]
row_arr = numpy.arange(pixel_bounds[0], pixel_bounds[1])
col_arr = numpy.arange(pixel_bounds[2], pixel_bounds[3])
return self.get_orthorectified_from_array(ortho_bounds, row_arr, col_arr, pixel_array)
def get_orthorectified_for_pixel_bounds(self, pixel_bounds):
"""
Determine the array corresponding to the given array bounds given in reader
pixel coordinates.
Parameters
----------
pixel_bounds : numpy.ndarray|list|tuple
Of the form `(row_min, row_max, col_min, col_max)`.
Returns
-------
numpy.ndarray
"""
pixel_bounds, pixel_rect = self.bounds_to_rectangle(pixel_bounds)
return self.get_orthorectified_for_pixel_object(pixel_rect)
def get_orthorectified_for_pixel_object(self, coordinates):
"""
Determine the ortho-rectified rectangular array values, which will bound
the given object - with coordinates expressed in pixel space.
Parameters
----------
coordinates : GeometryObject|numpy.ndarray|list|tuple
The coordinate system of the input will be assumed to be pixel space.
Returns
-------
numpy.ndarray
"""
bounds = self.get_orthorectification_bounds_from_pixel_object(coordinates)
return self.get_orthorectified_for_ortho_bounds(bounds)
def get_orthorectified_for_latlon_object(self, ll_coordinates):
"""
Determine the ortho-rectified rectangular array values, which will bound
the given object - with coordinates expressed in lat/lon space.
Parameters
----------
ll_coordinates : GeometryObject|numpy.ndarray|list|tuple
The coordinate system of the input will be assumed to be pixel space.
**Note** a GeometryObject is expected to follow lon/lat ordering paradigm,
by convention.
Returns
-------
numpy.ndarray
"""
bounds = self.get_orthorectification_bounds_from_latlon_object(ll_coordinates)
return self.get_orthorectified_for_ortho_bounds(bounds)
def _setup_flat_workspace(self, ortho_bounds, row_array, col_array, value_array):
"""
Helper method for setting up the flat workspace.
Parameters
----------
ortho_bounds : numpy.ndarray
Determines the orthorectified bounds region, of the form
`(min row, max row, min column, max column)`.
row_array : numpy.ndarray
The rows of the pixel array. Must be one-dimensional, monotonically
increasing, and have `row_array.size = value_array.shape[0]`.
col_array : numpy.ndarray
The columns of the pixel array. Must be one-dimensional, monotonically
increasing, and have `col_array.size = value_array.shape[1]`.
value_array : numpy.ndarray
The values array. If this has complex dtype and `complex_valued=False`,
then the :func:`numpy.abs` will be applied.
Returns
-------
(numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray)
"""
value_array = self._validate_row_col_values(row_array, col_array, value_array, value_is_flat=True)
# set up the results workspace
ortho_array = self._initialize_workspace(ortho_bounds)
# determine the pixel coordinates for the ortho coordinates meshgrid
ortho_mesh = self._get_ortho_mesh(ortho_bounds)
# determine the nearest neighbor pixel coordinates
pixel_mesh = self.proj_helper.ortho_to_pixel(ortho_mesh)
pixel_rows = pixel_mesh[:, :, 0]
pixel_cols = pixel_mesh[:, :, 1]
return value_array, pixel_rows, pixel_cols, ortho_array
def _get_orthrectified_from_array_flat(self, ortho_bounds, row_array, col_array, value_array):
"""
Construct the orthorecitified array covering the orthorectified region given by
`ortho_bounds` based on the `values_array`, which spans the pixel region defined by
`row_array` and `col_array`.
Parameters
----------
ortho_bounds : numpy.ndarray
Determines the orthorectified bounds region, of the form
`(min row, max row, min column, max column)`.
row_array : numpy.ndarray
The rows of the pixel array. Must be one-dimensional, monotonically
increasing, and have `row_array.size = value_array.shape[0]`.
col_array
The columns of the pixel array. Must be one-dimensional, monotonically
increasing, and have `col_array.size = value_array.shape[1]`.
value_array
The values array. If this has complex dtype and `complex_valued=False`,
then the :func:`numpy.abs` will be applied.
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
class NearestNeighborMethod(OrthorectificationHelper):
"""
Nearest neighbor ortho-rectification method.
.. warning::
Modification of the proj_helper parameters when the default full image
bounds have been defained (i.e. sicd.RadarCollection.Area is defined) may
result in unintended results.
"""
def __init__(self, reader, index=0, proj_helper=None, complex_valued=False,
pad_value=None, apply_radiometric=None, subtract_radiometric_noise=False):
"""
Parameters
----------
reader : SICDTypeReader
index : int
proj_helper : None|ProjectionHelper
If `None`, this will default to `PGRatPolyProjection(<sicd>)` unless there is
a SarpyRatPolyError, when it will fall back to `PGProjection(<sicd>)`,
where `<sicd>` will be the sicd from `reader` at `index`. Otherwise,
it is the user's responsibility to ensure that `reader`, `index` and
`proj_helper` are in sync.
complex_valued : bool
Do we want complex values returned? If `False`, the magnitude values
will be used.
pad_value : None|Any
Value to use for any out-of-range pixels. Defaults to `0` if not provided.
apply_radiometric : None|str
**Only valid if `complex_valued=False`**. If provided, must be one of
`['RCS', 'Sigma0', 'Gamma0', 'Beta0']` (not case-sensitive). This will
apply the given radiometric scale factor to the array values.
subtract_radiometric_noise : bool
**Only has any effect if `apply_radiometric` is provided.** This indicates that
the radiometric noise should be subtracted prior to applying the given
radiometric scale factor.
"""
super(NearestNeighborMethod, self).__init__(
reader, index=index, proj_helper=proj_helper, complex_valued=complex_valued,
pad_value=pad_value, apply_radiometric=apply_radiometric,
subtract_radiometric_noise=subtract_radiometric_noise)
def _get_orthrectified_from_array_flat(self, ortho_bounds, row_array, col_array, value_array):
# setup the result workspace
value_array, pixel_rows, pixel_cols, ortho_array = self._setup_flat_workspace(
ortho_bounds, row_array, col_array, value_array)
# potentially apply the radiometric parameters to the value array
value_array = self._apply_radiometric_params(row_array, col_array, value_array)
if value_array.size > 0:
# determine the in bounds points
mask = self._get_mask(pixel_rows, pixel_cols, row_array, col_array)
# determine the nearest neighbors for our row/column indices
row_inds = numpy.digitize(pixel_rows[mask], row_array)
col_inds = numpy.digitize(pixel_cols[mask], col_array)
ortho_array[mask] = value_array[row_inds, col_inds]
return ortho_array
class BivariateSplineMethod(OrthorectificationHelper):
"""
Bivariate spline interpolation ortho-rectification method.
.. warning::
Modification of the proj_helper parameters when the default full image
bounds have been defained (i.e. sicd.RadarCollection.Area is defined) may
result in unintended results.
"""
__slots__ = ('_row_order', '_col_order')
def __init__(self, reader, index=0, proj_helper=None, complex_valued=False,
pad_value=None, apply_radiometric=None, subtract_radiometric_noise=False,
row_order=1, col_order=1):
"""
Parameters
----------
reader : SICDTypeReader
index : int
proj_helper : None|ProjectionHelper
If `None`, this will default to `PGRatPolyProjection(<sicd>)` unless there is
a SarpyRatPolyError, when it will fall back to `PGProjection(<sicd>)`,
where `<sicd>` will be the sicd from `reader` at `index`. Otherwise,
it is the user's responsibility to ensure that `reader`, `index` and
`proj_helper` are in sync.
complex_valued : bool
Do we want complex values returned? If `False`, the magnitude values
will be used.
pad_value : None|Any
Value to use for any out-of-range pixels. Defaults to `0` if not provided.
apply_radiometric : None|str
**Only valid if `complex_valued=False`**. If provided, must be one of
`['RCS', 'Sigma0', 'Gamma0', 'Beta0']` (not case-sensitive). This will
apply the given radiometric scale factor to the array values.
subtract_radiometric_noise : bool
**Only has any effect if `apply_radiometric` is provided.** This indicates that
the radiometric noise should be subtracted prior to applying the given
radiometric scale factor.
row_order : int
The row degree for the spline.
col_order : int
The column degree for the spline.
"""
self._row_order = None
self._col_order = None
if complex_valued:
raise ValueError('BivariateSpline only supports real valued results for now.')
super(BivariateSplineMethod, self).__init__(
reader, index=index, proj_helper=proj_helper, complex_valued=complex_valued,
pad_value=pad_value, apply_radiometric=apply_radiometric,
subtract_radiometric_noise=subtract_radiometric_noise)
self.row_order = row_order
self.col_order = col_order
@property
def row_order(self):
"""
int : The spline order for the x/row coordinate, where `1 <= row_order <= 5`.
"""
return self._row_order
@row_order.setter
def row_order(self, value):
value = int(value)
if not (1 <= value <= 5):
raise ValueError('row_order must take value between 1 and 5.')
self._row_order = value
@property
def col_order(self):
"""
int : The spline order for the y/col coordinate, where `1 <= col_order <= 5`.
"""
return self._col_order
@col_order.setter
def col_order(self, value):
value = int(value)
if not (1 <= value <= 5):
raise ValueError('col_order must take value between 1 and 5.')
self._col_order = value
def _get_orthrectified_from_array_flat(self, ortho_bounds, row_array, col_array, value_array):
# setup the result workspace
value_array, pixel_rows, pixel_cols, ortho_array = self._setup_flat_workspace(
ortho_bounds, row_array, col_array, value_array)
value_array = self._apply_radiometric_params(row_array, col_array, value_array)
if value_array.size > 0:
# set up our spline
sp = RectBivariateSpline(row_array, col_array, value_array, kx=self.row_order, ky=self.col_order, s=0)
# determine the in bounds points
mask = self._get_mask(pixel_rows, pixel_cols, row_array, col_array)
result = sp.ev(pixel_rows[mask], pixel_cols[mask])
# potentially apply the radiometric parameters
ortho_array[mask] = result
return ortho_array
| 46,289 | 39.964602 | 116 | py |
sarpy | sarpy-master/sarpy/processing/sidd/sidd_product_creation.py | """
Methods for creating a variety of SIDD products.
Examples
--------
Create a variety of sidd products.
.. code-block:: python
import os
from sarpy.io.complex.converter import open_complex
from sarpy.processing.ortho_rectify import BivariateSplineMethod, NearestNeighborMethod
from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_csi_sidd, create_dynamic_image_sidd
# open a sicd type file
reader = open_complex('<sicd type object file name>')
# create an orthorectification helper for specified sicd index
ortho_helper = NearestNeighborMethod(reader, index=0)
# create a sidd version 2 detected image for the whole file
create_detected_image_sidd(ortho_helper, '<output directory>', block_size=10, version=2)
# create a sidd version 2 color sub-aperture image for the whole file
create_csi_sidd(ortho_helper, '<output directory>', dimension=0, version=2)
# create a sidd version 2 dynamic image/sub-aperture stack for the whole file
create_dynamic_image_sidd(ortho_helper, '<output directory>', dimension=0, version=2)
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import os
from sarpy.processing.ortho_rectify.base import FullResolutionFetcher, OrthorectificationIterator
from sarpy.processing.ortho_rectify.ortho_methods import OrthorectificationHelper
from sarpy.processing.sidd.sidd_structure_creation import create_sidd_structure
from sarpy.processing.sicd.csi import CSICalculator
from sarpy.processing.sicd.subaperture import SubapertureCalculator, SubapertureOrthoIterator
from sarpy.io.product.sidd import SIDDWriter
from sarpy.io.general.base import SarpyIOError
from sarpy.visualization.remap import MonochromaticRemap, NRL
# TODO: move this to processing for 1.3.0
DEFAULT_IMG_REMAP = NRL
DEFAULT_CSI_REMAP = NRL
DEFAULT_DI_REMAP = NRL
_output_text = 'output_directory `{}`\n\t' \
'does not exist or is not a directory'
_orthohelper_text = 'ortho_helper is required to be an instance of OrthorectificationHelper,\n\t' \
'got type `{}`'
def _validate_filename(output_directory, output_file, sidd_structure):
"""
Validate the output filename.
Parameters
----------
output_directory : str
output_file : None|str
sidd_structure
Returns
-------
str
"""
if output_file is None:
# noinspection PyProtectedMember
fstem = os.path.split(sidd_structure.NITF['SUGGESTED_NAME']+'.nitf')[1]
else:
fstem = os.path.split(output_file)[1]
full_filename = os.path.join(os.path.expanduser(output_directory), fstem)
if os.path.exists(full_filename):
raise SarpyIOError('File {} already exists.'.format(full_filename))
return full_filename
def _validate_remap_function(remap_function):
"""
Verify that the given monochromatic remap function is viable for SIDD
production.
Parameters
----------
remap_function : MonochromaticRemap
"""
if not isinstance(remap_function, MonochromaticRemap):
raise TypeError('remap_function must be an instance of MonochromaticRemap')
if remap_function.bit_depth not in [8, 16]:
raise TypeError('remap_function usage for SIDD requires 8 or 16 bit output')
def create_detected_image_sidd(
ortho_helper, output_directory, output_file=None, block_size=10, dimension=0,
bounds=None, version=3, include_sicd=True, remap_function=None):
"""
Create a SIDD version of a basic detected image from a SICD type reader.
Parameters
----------
ortho_helper : OrthorectificationHelper
The ortho-rectification helper object.
output_directory : str
The output directory for the given file.
output_file : None|str
The file name, this will default to a sensible value.
block_size : int
The approximate processing block size to fetch, given in MB. The
minimum value for use here will be 1.
dimension : int
Which dimension to split over in block processing? Must be either 0 or 1.
bounds : None|numpy.ndarray|list|tuple
The sicd pixel bounds of the form `(min row, max row, min col, max col)`.
This will default to the full image.
version : int
The SIDD version to use, must be one of 1, 2, or 3.
include_sicd : bool
Include the SICD structure in the SIDD file?
remap_function : None|MonochromaticRemap
The applied remap function. If one is not provided, then a default is
used. Required global parameters will be calculated if they are missing,
so the internal state of this remap function may be modified.
Returns
-------
None
Examples
--------
.. code-block:: python
import os
from sarpy.io.complex.converter import open_complex
from sarpy.processing.ortho_rectify import BivariateSplineMethod, NearestNeighborMethod
from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd
reader = open_complex('<sicd type object file name>')
ortho_helper = NearestNeighborMethod(reader, index=0)
# create a sidd version 2 file for the whole file
create_detected_image_sidd(ortho_helper, '<output directory>', block_size=10, version=2)
"""
if not os.path.isdir(output_directory):
raise SarpyIOError(_output_text.format(output_directory))
if not isinstance(ortho_helper, OrthorectificationHelper):
raise TypeError(_orthohelper_text.format(type(ortho_helper)))
if remap_function is None:
remap_function = DEFAULT_IMG_REMAP(override_name='IMG_DEFAULT')
_validate_remap_function(remap_function)
# construct the ortho-rectification iterator - for a basic data fetcher
calculator = FullResolutionFetcher(
ortho_helper.reader, dimension=dimension, index=ortho_helper.index, block_size=block_size)
ortho_iterator = OrthorectificationIterator(
ortho_helper, calculator=calculator, bounds=bounds,
remap_function=remap_function, recalc_remap_globals=False)
# create the sidd structure
ortho_bounds = ortho_iterator.ortho_bounds
sidd_structure = create_sidd_structure(
ortho_helper, ortho_bounds,
product_class='Detected Image', pixel_type='MONO{}I'.format(remap_function.bit_depth), version=version)
# set suggested name
sidd_structure.NITF['SUGGESTED_NAME'] = ortho_helper.sicd.get_suggested_name(ortho_helper.index)+'_IMG'
# create the sidd writer
full_filename = _validate_filename(output_directory, output_file, sidd_structure)
with SIDDWriter(full_filename, sidd_structure, ortho_helper.sicd if include_sicd else None) as writer:
# iterate and write
for data, start_indices in ortho_iterator:
writer(data, start_indices=start_indices, index=0)
def create_csi_sidd(
ortho_helper, output_directory, output_file=None, dimension=0,
block_size=30, bounds=None, version=3, include_sicd=True, remap_function=None):
"""
Create a SIDD version of a Color Sub-Aperture Image from a SICD type reader.
Parameters
----------
ortho_helper : OrthorectificationHelper
The ortho-rectification helper object.
output_directory : str
The output directory for the given file.
output_file : None|str
The file name, this will default to a sensible value.
dimension : int
The dimension over which to split the sub-aperture.
block_size : int
The approximate processing block size to fetch, given in MB. The
minimum value for use here will be 1.
bounds : None|numpy.ndarray|list|tuple
The sicd pixel bounds of the form `(min row, max row, min col, max col)`.
This will default to the full image.
version : int
The SIDD version to use, must be one of 1, 2, or 3.
include_sicd : bool
Include the SICD structure in the SIDD file?
remap_function : None|MonochromaticRemap
The applied remap function. For csi processing, this must explicitly be
an 8-bit remap. If one is not provided, then a default is used. Required
global parameters will be calculated if they are missing, so the internal
state of this remap function may be modified.
Returns
-------
None
Examples
--------
.. code-block:: python
import os
from sarpy.io.complex.converter import open_complex
from sarpy.processing.sidd.sidd_product_creation import create_csi_sidd
from sarpy.processing.sicd.csi import CSICalculator
from sarpy.processing.ortho_rectify import NearestNeighborMethod
reader = open_complex('<sicd type object file name>')
ortho_helper = NearestNeighborMethod(reader, index=0)
create_csi_sidd(ortho_helper, '<output directory>', dimension=0, version=2)
"""
if not os.path.isdir(output_directory):
raise SarpyIOError(_output_text.format(output_directory))
if not isinstance(ortho_helper, OrthorectificationHelper):
raise TypeError(_orthohelper_text.format(type(ortho_helper)))
# construct the CSI calculator class
csi_calculator = CSICalculator(
ortho_helper.reader, dimension=dimension, index=ortho_helper.index, block_size=block_size)
if remap_function is None:
remap_function = DEFAULT_CSI_REMAP(override_name='CSI_DEFAULT', bit_depth=8)
_validate_remap_function(remap_function)
if remap_function.bit_depth != 8:
raise ValueError('The CSI SIDD specifically requires an 8-bit remap function.')
# construct the ortho-rectification iterator
ortho_iterator = OrthorectificationIterator(
ortho_helper, calculator=csi_calculator, bounds=bounds,
remap_function=remap_function, recalc_remap_globals=False)
# create the sidd structure
ortho_bounds = ortho_iterator.ortho_bounds
sidd_structure = create_sidd_structure(
ortho_helper, ortho_bounds,
product_class='Color Subaperture Image', pixel_type='RGB24I', version=version)
# set suggested name
sidd_structure.NITF['SUGGESTED_NAME'] = csi_calculator.sicd.get_suggested_name(csi_calculator.index)+'_CSI'
# create the sidd writer
full_filename = _validate_filename(output_directory, output_file, sidd_structure)
with SIDDWriter(full_filename, sidd_structure, csi_calculator.sicd if include_sicd else None) as writer:
# iterate and write
for data, start_indices in ortho_iterator:
writer(data, start_indices=start_indices, index=0)
def create_dynamic_image_sidd(
ortho_helper, output_directory, output_file=None, dimension=0, block_size=10,
bounds=None, frame_count=9, aperture_fraction=0.2, method='FULL', version=3,
include_sicd=True, remap_function=None):
"""
Create a SIDD version of a Dynamic Image (Sub-Aperture Stack) from a SICD type reader.
Parameters
----------
ortho_helper : OrthorectificationHelper
The ortho-rectification helper object.
output_directory : str
The output directory for the given file.
output_file : None|str
The file name, this will default to a sensible value.
dimension : int
The dimension over which to split the sub-aperture.
block_size : int
The approximate processing block size to fetch, given in MB. The
minimum value for use here will be 1.
bounds : None|numpy.ndarray|list|tuple
The sicd pixel bounds of the form `(min row, max row, min col, max col)`.
This will default to the full image.
frame_count : int
The number of frames to calculate.
aperture_fraction : float
The relative size of each aperture window.
method : str
The subaperture processing method, which must be one of
`('NORMAL', 'FULL', 'MINIMAL')`.
version : int
The SIDD version to use, must be one of 1, 2, or 3.
include_sicd : bool
Include the SICD structure in the SIDD file?
remap_function : None|MonochromaticRemap
The applied remap function. If one is not provided, then a default is
used. Required global parameters will be calculated if they are missing,
so the internal state of this remap function may be modified.
Returns
-------
None
Examples
--------
Create a basic dynamic image.
.. code-block:: python
import os
from sarpy.io.complex.converter import open_complex
from sarpy.processing.sidd.sidd_product_creation import create_dynamic_image_sidd
from sarpy.processing.sicd.csi import CSICalculator
from sarpy.processing.ortho_rectify import NearestNeighborMethod
reader = open_complex('<sicd type object file name>')
ortho_helper = NearestNeighborMethod(reader, index=0)
create_dynamic_image_sidd(ortho_helper, '<output directory>', dimension=0, version=2)
"""
if not os.path.isdir(output_directory):
raise SarpyIOError(_output_text.format(output_directory))
if not isinstance(ortho_helper, OrthorectificationHelper):
raise TypeError(_orthohelper_text.format(type(ortho_helper)))
# construct the subaperture calculator class
subap_calculator = SubapertureCalculator(
ortho_helper.reader, dimension=dimension, index=ortho_helper.index, block_size=block_size,
frame_count=frame_count, aperture_fraction=aperture_fraction, method=method)
if remap_function is None:
remap_function = DEFAULT_DI_REMAP(override_name='DI_DEFAULT')
_validate_remap_function(remap_function)
# construct the ortho-rectification iterator
ortho_iterator = SubapertureOrthoIterator(
ortho_helper, calculator=subap_calculator, bounds=bounds,
remap_function=remap_function, recalc_remap_globals=False, depth_first=True)
# create the sidd structure
ortho_bounds = ortho_iterator.ortho_bounds
sidd_structure = create_sidd_structure(
ortho_helper, ortho_bounds,
product_class='Dynamic Image', pixel_type='MONO{}I'.format(remap_function.bit_depth), version=version)
# set suggested name
sidd_structure.NITF['SUGGESTED_NAME'] = subap_calculator.sicd.get_suggested_name(subap_calculator.index)+'__DI'
the_sidds = []
for i in range(subap_calculator.frame_count):
this_sidd = sidd_structure.copy()
this_sidd.ProductCreation.ProductType = 'Frame {}'.format(i+1)
the_sidds.append(this_sidd)
# create the sidd writer
if output_file is None:
# noinspection PyProtectedMember
full_filename = os.path.join(output_directory, sidd_structure.NITF['SUGGESTED_NAME']+'.nitf')
else:
full_filename = os.path.join(output_directory, output_file)
if os.path.exists(os.path.expanduser(full_filename)):
raise SarpyIOError('File {} already exists.'.format(full_filename))
with SIDDWriter(full_filename, the_sidds, subap_calculator.sicd if include_sicd else None) as writer:
# iterate and write
for data, start_indices, the_frame in ortho_iterator:
writer(data, start_indices=start_indices, index=the_frame)
| 15,252 | 39.566489 | 130 | py |
sarpy | sarpy-master/sarpy/processing/sidd/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/processing/sidd/sidd_structure_creation.py | """
Common functionality for creating the SIDD structure from a SICD structure and
OrthorectificationHelper.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import numpy
from sarpy.io.complex.utils import two_dim_poly_fit, get_im_physical_coords
from sarpy.processing.ortho_rectify import OrthorectificationHelper, ProjectionHelper, \
PGProjection
# agnostic to versions 1 & 2
from sarpy.io.product.sidd2_elements.Measurement import PlaneProjectionType, ProductPlaneType
from sarpy.io.product.sidd2_elements.blocks import ReferencePointType, Poly2DType, XYZPolyType, \
FilterType, FilterBankType, PredefinedFilterType, NewLookupTableType, PredefinedLookupType
# version 3 elements
from sarpy.io.product.sidd3_elements.SIDD import SIDDType as SIDDType3
from sarpy.io.product.sidd3_elements.Display import ProductDisplayType as ProductDisplayType3, \
NonInteractiveProcessingType as NonInteractiveProcessingType3, \
ProductGenerationOptionsType as ProductGenerationOptionsType3, RRDSType as RRDSType3, \
InteractiveProcessingType as InteractiveProcessingType3, GeometricTransformType as GeometricTransformType3, \
SharpnessEnhancementType as SharpnessEnhancementType3, DynamicRangeAdjustmentType as DynamicRangeAdjustmentType3, \
ScalingType as ScalingType3, OrientationType as OrientationType3
from sarpy.io.product.sidd3_elements.GeoData import GeoDataType as GeoDataType3
from sarpy.io.product.sidd3_elements.Measurement import MeasurementType as MeasurementType3, \
PlaneProjectionType as PlaneProjectionType3, ProductPlaneType as ProductPlaneType3
from sarpy.io.product.sidd3_elements.ExploitationFeatures import ExploitationFeaturesType as ExploitationFeaturesType3
from sarpy.io.product.sidd3_elements.ProductCreation import ProductCreationType as ProductCreationType3
from sarpy.io.product.sidd3_elements.blocks import ReferencePointType as ReferencePointType3, \
Poly2DType as Poly2DType3, XYZPolyType as XYZPolyType3, FilterType as FilterType3, \
FilterBankType as FilterBankType3, PredefinedFilterType as PredefinedFilterType3, \
NewLookupTableType as NewLookupTableType3, PredefinedLookupType as PredefinedLookupType3
# version 2 elements
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd2_elements.Display import ProductDisplayType as ProductDisplayType2, \
NonInteractiveProcessingType as NonInteractiveProcessingType2, \
ProductGenerationOptionsType as ProductGenerationOptionsType2, RRDSType as RRDSType2, \
InteractiveProcessingType as InteractiveProcessingType2, GeometricTransformType as GeometricTransformType2, \
SharpnessEnhancementType as SharpnessEnhancementType2, DynamicRangeAdjustmentType as DynamicRangeAdjustmentType2, \
ScalingType as ScalingType2, OrientationType as OrientationType2
from sarpy.io.product.sidd2_elements.GeoData import GeoDataType as GeoDataType2
from sarpy.io.product.sidd2_elements.Measurement import MeasurementType as MeasurementType2
from sarpy.io.product.sidd2_elements.ExploitationFeatures import ExploitationFeaturesType as ExploitationFeaturesType2
from sarpy.io.product.sidd2_elements.ProductCreation import ProductCreationType as ProductCreationType2
# version 1 elements
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
from sarpy.io.product.sidd1_elements.Display import ProductDisplayType as ProductDisplayType1
from sarpy.io.product.sidd1_elements.GeographicAndTarget import GeographicAndTargetType as GeographicAndTargetType1, \
GeographicCoverageType as GeographicCoverageType1, GeographicInformationType as GeographicInformationType1
from sarpy.io.product.sidd1_elements.Measurement import MeasurementType as MeasurementType1
from sarpy.io.product.sidd1_elements.ExploitationFeatures import ExploitationFeaturesType as ExploitationFeaturesType1
from sarpy.io.product.sidd1_elements.ProductCreation import ProductCreationType as ProductCreationType1
logger = logging.getLogger(__name__)
_proj_helper_text = 'Unhandled projection helper type `{}`'
# TODO: move this to processing for 1.3.0
def _fit_timecoa_poly(proj_helper, bounds):
"""
Fit the TimeCOA in new pixel coordinates.
Parameters
----------
proj_helper : ProjectionHelper
bounds : numpy.ndarray
The orthorectification pixel bounds of the form `(min row, max row, min col, max col)`.
Returns
-------
Poly2DType
"""
# what is the order of the sicd timecoapoly?
in_poly = proj_helper.sicd.Grid.TimeCOAPoly
use_order = max(in_poly.order1, in_poly.order2)
if use_order == 0:
# this is a constant polynomial, must be a spotlight collect
return Poly2DType(Coefs=in_poly.get_array())
# create an ortho coordinate grid
samples = use_order+10
ortho_grid = numpy.zeros((samples, samples, 2), dtype=numpy.float64)
ortho_grid[:, :, 1], ortho_grid[:, :, 0] = numpy.meshgrid(
numpy.linspace(bounds[2], bounds[3], num=samples),
numpy.linspace(bounds[0], bounds[1], num=samples))
# map to pixel grid coordinates
pixel_grid = proj_helper.ortho_to_pixel(ortho_grid)
pixel_rows_m = get_im_physical_coords(
pixel_grid[:, :, 0], proj_helper.sicd.Grid, proj_helper.sicd.ImageData, 'row')
pixel_cols_m = get_im_physical_coords(
pixel_grid[:, :, 1], proj_helper.sicd.Grid, proj_helper.sicd.ImageData, 'col')
# evaluate the sicd timecoapoly
timecoa_values = proj_helper.sicd.Grid.TimeCOAPoly(pixel_rows_m, pixel_cols_m)
# fit this at the ortho_grid coordinates
sidd_timecoa_coeffs, residuals, rank, sing_values = two_dim_poly_fit(
ortho_grid[:, :, 0] - bounds[0],
ortho_grid[:, :, 1] - bounds[2], timecoa_values,
x_order=use_order, y_order=use_order, x_scale=1e-3, y_scale=1e-3, rcond=1e-40)
logger.warning(
'The time_coa_fit details:\n\t'
'root mean square residuals = {}\n\t'
'rank = {}\n\t'
'singular values = {}'.format(residuals, rank, sing_values))
return Poly2DType(Coefs=sidd_timecoa_coeffs)
def _create_plane_projection(proj_helper, bounds):
"""
Construct the PlaneProjection structure for both version 1 & 2.
Parameters
----------
proj_helper : PGProjection
bounds : numpy.ndarray
The orthorectification pixel bounds of the form `(min row, max row, min col, max col)`.
Returns
-------
PlaneProjectionType
"""
ref_pixels = proj_helper.reference_pixels
return PlaneProjectionType(
ReferencePoint=ReferencePointType(ECEF=proj_helper.reference_point,
Point=(float(ref_pixels[0]-bounds[0]), float(ref_pixels[1]-bounds[2]))),
SampleSpacing=(proj_helper.row_spacing, proj_helper.col_spacing),
TimeCOAPoly=_fit_timecoa_poly(proj_helper, bounds),
ProductPlane=ProductPlaneType(RowUnitVector=proj_helper.row_vector,
ColUnitVector=proj_helper.col_vector))
def _fit_timecoa_poly_v3(proj_helper, bounds):
"""
Fit the TimeCOA in new pixel coordinates.
Parameters
----------
proj_helper : ProjectionHelper
bounds : numpy.ndarray
The orthorectification pixel bounds of the form `(min row, max row, min col, max col)`.
Returns
-------
Poly2DType3
"""
# what is the order of the sicd timecoapoly?
in_poly = proj_helper.sicd.Grid.TimeCOAPoly
use_order = max(in_poly.order1, in_poly.order2)
if use_order == 0:
# this is a constant polynomial, must be a spotlight collect
return Poly2DType3(Coefs=in_poly.get_array())
# create an ortho coordinate grid
samples = use_order+10
ortho_grid = numpy.zeros((samples, samples, 2), dtype=numpy.float64)
ortho_grid[:, :, 1], ortho_grid[:, :, 0] = numpy.meshgrid(
numpy.linspace(bounds[2], bounds[3], num=samples),
numpy.linspace(bounds[0], bounds[1], num=samples))
# map to pixel grid coordinates
pixel_grid = proj_helper.ortho_to_pixel(ortho_grid)
pixel_rows_m = get_im_physical_coords(
pixel_grid[:, :, 0], proj_helper.sicd.Grid, proj_helper.sicd.ImageData, 'row')
pixel_cols_m = get_im_physical_coords(
pixel_grid[:, :, 1], proj_helper.sicd.Grid, proj_helper.sicd.ImageData, 'col')
# evaluate the sicd timecoapoly
timecoa_values = proj_helper.sicd.Grid.TimeCOAPoly(pixel_rows_m, pixel_cols_m)
# fit this at the ortho_grid coordinates
sidd_timecoa_coeffs, residuals, rank, sing_values = two_dim_poly_fit(
ortho_grid[:, :, 0] - bounds[0],
ortho_grid[:, :, 1] - bounds[2], timecoa_values,
x_order=use_order, y_order=use_order, x_scale=1e-3, y_scale=1e-3, rcond=1e-40)
logger.warning(
'The time_coa_fit details:\n\t'
'root mean square residuals = {}\n\t'
'rank = {}\n\t'
'singular values = {}'.format(residuals, rank, sing_values))
return Poly2DType3(Coefs=sidd_timecoa_coeffs)
def _create_plane_projection_v3(proj_helper, bounds):
"""
Construct the PlaneProjection structure for both version 1 & 2.
Parameters
----------
proj_helper : PGProjection
bounds : numpy.ndarray
The orthorectification pixel bounds of the form `(min row, max row, min col, max col)`.
Returns
-------
PlaneProjectionType3
"""
ref_pixels = proj_helper.reference_pixels
return PlaneProjectionType3(
ReferencePoint=ReferencePointType3(ECEF=proj_helper.reference_point,
Point=(float(ref_pixels[0]-bounds[0]),
float(ref_pixels[1]-bounds[2]))),
SampleSpacing=(proj_helper.row_spacing, proj_helper.col_spacing),
TimeCOAPoly=_fit_timecoa_poly_v3(proj_helper, bounds),
ProductPlane=ProductPlaneType3(RowUnitVector=proj_helper.row_vector,
ColUnitVector=proj_helper.col_vector))
#########################
# Version 3 element creation
def create_sidd_structure_v3(ortho_helper, bounds, product_class, pixel_type):
"""
Create a SIDD version 3.0 structure based on the orthorectification helper
and pixel bounds.
Parameters
----------
ortho_helper : OrthorectificationHelper
bounds : numpy.ndarray|list|tuple
The orthorectification pixel bounds of the form `(min row, max row, min col, max col)`.
product_class : str
A descriptive name for the product class. Examples -
:code:`Dynamic Image, Amplitude Change Detection, Coherent Change Detection`
pixel_type : str
Must be one of `MONO8I, MONO16I` or `RGB24I`.
Returns
-------
SIDDType3
"""
def _create_display_v3():
if pixel_type in ('MONO8I', 'MONO16I'):
bands = 1
elif pixel_type == 'RGB24I':
bands = 3
else:
raise ValueError('pixel_type must be one of MONO8I, MONO16I, RGB24I. Got {}'.format(pixel_type))
return ProductDisplayType3(
PixelType=pixel_type,
NumBands=bands,
NonInteractiveProcessing=[NonInteractiveProcessingType3(
ProductGenerationOptions=ProductGenerationOptionsType3(
DataRemapping=NewLookupTableType3(
LUTName='DENSITY',
Predefined=PredefinedLookupType3(
DatabaseName='DENSITY'))),
RRDS=RRDSType3(DownsamplingMethod='DECIMATE'),
band=i+1) for i in range(bands)],
InteractiveProcessing=[InteractiveProcessingType3(
GeometricTransform=GeometricTransformType3(
Scaling=ScalingType3(
AntiAlias=FilterType3(
FilterName='AntiAlias',
FilterBank=FilterBankType3(
Predefined=PredefinedFilterType3(DatabaseName='BILINEAR')),
Operation='CONVOLUTION'),
Interpolation=FilterType3(
FilterName='Interpolation',
FilterBank=FilterBankType3(
Predefined=PredefinedFilterType3(DatabaseName='BILINEAR')),
Operation='CONVOLUTION')),
Orientation=OrientationType3(ShadowDirection='ARBITRARY')),
SharpnessEnhancement=SharpnessEnhancementType3(
ModularTransferFunctionEnhancement=FilterType3(
FilterName='ModularTransferFunctionEnhancement',
FilterBank=FilterBankType3(
Predefined=PredefinedFilterType3(DatabaseName='BILINEAR')),
Operation='CONVOLUTION')),
DynamicRangeAdjustment=DynamicRangeAdjustmentType3(
AlgorithmType='NONE',
BandStatsSource=1),
band=i+1) for i in range(bands)])
def _create_measurement_v3():
proj_helper = ortho_helper.proj_helper
rows = bounds[1] - bounds[0]
cols = bounds[3] - bounds[2]
if isinstance(proj_helper, PGProjection):
# fit the time coa polynomial in ortho-pixel coordinates
plane_projection = _create_plane_projection_v3(proj_helper, bounds)
return MeasurementType3(PixelFootprint=(rows, cols),
ValidData=((0, 0), (0, cols), (rows, cols), (rows, 0)),
PlaneProjection=plane_projection,
ARPPoly=XYZPolyType3(
X=proj_helper.sicd.Position.ARPPoly.X.get_array(),
Y=proj_helper.sicd.Position.ARPPoly.Y.get_array(),
Z=proj_helper.sicd.Position.ARPPoly.Z.get_array()))
else:
return None
def _create_exploitation_v3():
proj_helper = ortho_helper.proj_helper
if isinstance(proj_helper, PGProjection):
return ExploitationFeaturesType3.from_sicd(
proj_helper.sicd, proj_helper.row_vector, proj_helper.col_vector)
else:
raise ValueError(_proj_helper_text.format(type(proj_helper)))
pixel_type = pixel_type.upper()
# validate bounds and get pixel coordinates rectangle
bounds, ortho_pixel_corners = ortho_helper.bounds_to_rectangle(bounds)
# construct appropriate SIDD elements
prod_create = ProductCreationType3.from_sicd(ortho_helper.proj_helper.sicd, product_class)
prod_create.Classification.ISMCATCESVersion = '201903'
prod_create.Classification.compliesWith = 'USGov'
# Display requires more product specifics
display = _create_display_v3()
# GeoData
llh_corners = ortho_helper.proj_helper.ortho_to_llh(ortho_pixel_corners)
geo_data = GeoDataType3(ImageCorners=llh_corners[:, :2], ValidData=llh_corners[:, :2])
# Measurement
measurement = _create_measurement_v3()
# ExploitationFeatures
exploit_feats = _create_exploitation_v3()
return SIDDType3(ProductCreation=prod_create,
GeoData=geo_data,
Display=display,
Measurement=measurement,
ExploitationFeatures=exploit_feats)
#########################
# Version 2 element creation
def create_sidd_structure_v2(ortho_helper, bounds, product_class, pixel_type):
"""
Create a SIDD version 2.0 structure based on the orthorectification helper
and pixel bounds.
Parameters
----------
ortho_helper : OrthorectificationHelper
bounds : numpy.ndarray|list|tuple
The orthorectification pixel bounds of the form `(min row, max row, min col, max col)`.
product_class : str
A descriptive name for the product class. Examples -
:code:`Dynamic Image, Amplitude Change Detection, Coherent Change Detection`
pixel_type : str
Must be one of `MONO8I, MONO16I` or `RGB24I`.
Returns
-------
SIDDType2
"""
def _create_display_v2():
if pixel_type in ('MONO8I', 'MONO16I'):
bands = 1
elif pixel_type == 'RGB24I':
bands = 3
else:
raise ValueError('pixel_type must be one of MONO8I, MONO16I, RGB24I. Got {}'.format(pixel_type))
return ProductDisplayType2(
PixelType=pixel_type,
NumBands=bands,
NonInteractiveProcessing=[NonInteractiveProcessingType2(
ProductGenerationOptions=ProductGenerationOptionsType2(
DataRemapping=NewLookupTableType(
LUTName='DENSITY',
Predefined=PredefinedLookupType(
DatabaseName='DENSITY'))),
RRDS=RRDSType2(DownsamplingMethod='DECIMATE'),
band=i+1) for i in range(bands)],
InteractiveProcessing=[InteractiveProcessingType2(
GeometricTransform=GeometricTransformType2(
Scaling=ScalingType2(
AntiAlias=FilterType(
FilterName='AntiAlias',
FilterBank=FilterBankType(
Predefined=PredefinedFilterType(DatabaseName='BILINEAR')),
Operation='CONVOLUTION'),
Interpolation=FilterType(
FilterName='Interpolation',
FilterBank=FilterBankType(
Predefined=PredefinedFilterType(DatabaseName='BILINEAR')),
Operation='CONVOLUTION')),
Orientation=OrientationType2(ShadowDirection='ARBITRARY')),
SharpnessEnhancement=SharpnessEnhancementType2(
ModularTransferFunctionEnhancement=FilterType(
FilterName='ModularTransferFunctionEnhancement',
FilterBank=FilterBankType(
Predefined=PredefinedFilterType(DatabaseName='BILINEAR')),
Operation='CONVOLUTION')),
DynamicRangeAdjustment=DynamicRangeAdjustmentType2(
AlgorithmType='NONE',
BandStatsSource=1),
band=i+1) for i in range(bands)])
def _create_measurement_v2():
proj_helper = ortho_helper.proj_helper
rows = bounds[1] - bounds[0]
cols = bounds[3] - bounds[2]
if isinstance(proj_helper, PGProjection):
# fit the time coa polynomial in ortho-pixel coordinates
plane_projection = _create_plane_projection(proj_helper, bounds)
return MeasurementType2(PixelFootprint=(rows, cols),
ValidData=((0, 0), (0, cols), (rows, cols), (rows, 0)),
PlaneProjection=plane_projection,
ARPPoly=XYZPolyType(
X=proj_helper.sicd.Position.ARPPoly.X.get_array(),
Y=proj_helper.sicd.Position.ARPPoly.Y.get_array(),
Z=proj_helper.sicd.Position.ARPPoly.Z.get_array()))
else:
return None
def _create_exploitation_v2():
proj_helper = ortho_helper.proj_helper
if isinstance(proj_helper, PGProjection):
return ExploitationFeaturesType2.from_sicd(
proj_helper.sicd, proj_helper.row_vector, proj_helper.col_vector)
else:
raise ValueError(_proj_helper_text.format(type(proj_helper)))
pixel_type = pixel_type.upper()
# validate bounds and get pixel coordinates rectangle
bounds, ortho_pixel_corners = ortho_helper.bounds_to_rectangle(bounds)
# construct appropriate SIDD elements
prod_create = ProductCreationType2.from_sicd(ortho_helper.proj_helper.sicd, product_class)
prod_create.Classification.ISMCATCESVersion = '201903'
prod_create.Classification.compliesWith = 'USGov'
# Display requires more product specifics
display = _create_display_v2()
# GeoData
llh_corners = ortho_helper.proj_helper.ortho_to_llh(ortho_pixel_corners)
geo_data = GeoDataType2(ImageCorners=llh_corners[:, :2], ValidData=llh_corners[:, :2])
# Measurement
measurement = _create_measurement_v2()
# ExploitationFeatures
exploit_feats = _create_exploitation_v2()
return SIDDType2(ProductCreation=prod_create,
GeoData=geo_data,
Display=display,
Measurement=measurement,
ExploitationFeatures=exploit_feats)
##########################
# Version 1 element creation
def create_sidd_structure_v1(ortho_helper, bounds, product_class, pixel_type):
"""
Create a SIDD version 1.0 structure based on the orthorectification helper
and pixel bounds.
Parameters
----------
ortho_helper : OrthorectificationHelper
bounds : numpy.ndarray|list|tuple
The orthorectification pixel bounds of the form `(min row, max row, min col, max col)`.
product_class : str
A descriptive name for the product class. Examples -
:code:`Dynamic Image, Amplitude Change Detection, Coherent Change Detection`
pixel_type : str
Must be one of `MONO8I, MONO16I` or `RGB24I`.
Returns
-------
SIDDType1
"""
def _create_display_v1():
if pixel_type not in ('MONO8I', 'MONO16I', 'RGB24I'):
raise ValueError(
'pixel_type must be one of MONO8I, MONO16I, RGB24I. Got {}'.format(pixel_type))
return ProductDisplayType1(PixelType=pixel_type)
def _create_measurement_v1():
proj_helper = ortho_helper.proj_helper
if isinstance(proj_helper, PGProjection):
# fit the time coa polynomial in ortho-pixel coordinates
plane_projection = _create_plane_projection(proj_helper, bounds)
return MeasurementType1(PixelFootprint=(bounds[1] - bounds[0], bounds[3] - bounds[2]),
PlaneProjection=plane_projection,
ARPPoly=XYZPolyType(
X=proj_helper.sicd.Position.ARPPoly.X.get_array(),
Y=proj_helper.sicd.Position.ARPPoly.Y.get_array(),
Z=proj_helper.sicd.Position.ARPPoly.Z.get_array()))
else:
raise ValueError(_proj_helper_text.format(type(proj_helper)))
def _create_exploitation_v1():
proj_helper = ortho_helper.proj_helper
if isinstance(proj_helper, PGProjection):
return ExploitationFeaturesType1.from_sicd(
proj_helper.sicd, proj_helper.row_vector, proj_helper.col_vector)
else:
raise ValueError(_proj_helper_text.format(type(proj_helper)))
pixel_type = pixel_type.upper()
# validate bounds and get pixel coordinates rectangle
bounds, ortho_pixel_corners = ortho_helper.bounds_to_rectangle(bounds)
# construct appropriate SIDD elements
prod_create = ProductCreationType1.from_sicd(ortho_helper.proj_helper.sicd, product_class)
# Display requires more product specifics
display = _create_display_v1()
# GeographicAndTarget
llh_corners = ortho_helper.proj_helper.ortho_to_llh(ortho_pixel_corners)
geographic = GeographicAndTargetType1(
GeographicCoverage=GeographicCoverageType1(Footprint=llh_corners[:, :2],
GeographicInfo=GeographicInformationType1()),)
# Measurement
measurement = _create_measurement_v1()
# ExploitationFeatures
exploit_feats = _create_exploitation_v1()
return SIDDType1(ProductCreation=prod_create,
Display=display,
GeographicAndTarget=geographic,
Measurement=measurement,
ExploitationFeatures=exploit_feats)
##########################
# Switchable version SIDD structure
def create_sidd_structure(ortho_helper, bounds, product_class, pixel_type, version=3):
"""
Create a SIDD structure, with version specified, based on the orthorectification
helper and pixel bounds.
Parameters
----------
ortho_helper : OrthorectificationHelper
bounds : numpy.ndarray|list|tuple
The orthorectification pixel bounds of the form `(min row, max row, min col, max col)`.
product_class : str
A descriptive name for the product class. Examples -
:code:`Dynamic Image, Amplitude Change Detection, Coherent Change Detection`
pixel_type : str
Must be one of `MONO8I, MONO16I` or `RGB24I`.
version : int
The SIDD version, must be either 1, 2, or 3.
Returns
-------
SIDDType1|SIDDType2|SIDDType3
"""
if version not in [1, 2, 3]:
raise ValueError('version must be 1, 2, or 3. Got {}'.format(version))
if version == 1:
return create_sidd_structure_v1(ortho_helper, bounds, product_class, pixel_type)
elif version == 2:
return create_sidd_structure_v2(ortho_helper, bounds, product_class, pixel_type)
else:
return create_sidd_structure_v3(ortho_helper, bounds, product_class, pixel_type)
| 25,449 | 44.527728 | 119 | py |
sarpy | sarpy-master/sarpy/annotation/base.py | """
Base annotation types for general use - based on the geojson implementation
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import os
from uuid import uuid4
from typing import Optional, Dict, List, Any, Union
import json
from collections import OrderedDict
from sarpy.geometry.geometry_elements import Jsonable, FeatureCollection, Feature, \
GeometryCollection, GeometryObject, Geometry, basic_assemble_from_collection
_BASE_VERSION = "Base:1.0"
logger = logging.getLogger(__name__)
class GeometryProperties(Jsonable):
__slots__ = ('_uid', '_name', '_color')
_type = 'GeometryProperties'
def __init__(self, uid=None, name=None, color=None):
self._name = None
self._color = None
if uid is None:
uid = str(uuid4())
if not isinstance(uid, str):
raise TypeError('uid must be a string')
self._uid = uid
self.name = name
self.color = color
@property
def uid(self):
"""
str: A unique identifier for the associated geometry element
"""
return self._uid
@property
def name(self):
"""
Optional[str]: The name
"""
return self._name
@name.setter
def name(self, value):
if value is None or isinstance(value, str):
self._name = value
else:
raise TypeError('Got unexpected type for name')
@property
def color(self):
"""
Optional[str]: The color
"""
return self._color
@color.setter
def color(self, value):
if value is None or isinstance(value, str):
self._color = value
else:
raise TypeError('Got unexpected type for color')
@classmethod
def from_dict(cls, the_json):
"""
Deserialize from json.
Parameters
----------
the_json : Dict
Returns
-------
GeometryProperties
"""
typ = the_json['type']
if typ != cls._type:
raise ValueError('GeometryProperties cannot be constructed from {}'.format(the_json))
return cls(
uid=the_json.get('uid', None),
name=the_json.get('name', None),
color=the_json.get('color', None))
def to_dict(self, parent_dict=None):
"""
Serialize to json.
Parameters
----------
parent_dict : None|Dict
Returns
-------
Dict
"""
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['uid'] = self.uid
if self.name is not None:
parent_dict['name'] = self.name
if self.color is not None:
parent_dict['color'] = self.color
return parent_dict
class AnnotationProperties(Jsonable):
"""
The basic common properties for an annotation
"""
__slots__ = ('_name', '_description', '_directory', '_geometry_properties', '_parameters')
_type = 'AnnotationProperties'
def __init__(self, name=None, description=None, directory=None,
geometry_properties=None, parameters=None):
"""
Parameters
----------
name : Optional[str]
description : Optional[str]
directory : Optional[str]
geometry_properties : None|List[GeometryProperties]
parameters : Optional[Jsonable]
"""
self._name = None
self._description = None
self._directory = None
self._geometry_properties = []
self._parameters = None
self.name = name
self.description = description
self.directory = directory
self.geometry_properties = geometry_properties
self.parameters = parameters
@property
def name(self):
"""
Optional[str]: The name
"""
return self._name
@name.setter
def name(self, value):
if value is None or isinstance(value, str):
self._name = value
else:
raise TypeError('Got unexpected type for name')
@property
def description(self):
"""
Optional[str]: The description
"""
return self._description
@description.setter
def description(self, value):
if value is None or isinstance(value, str):
self._description = value
else:
raise TypeError('Got unexpected type for description')
@property
def directory(self):
"""
Optional[str]: The directory - for basic display and/or subdivision purposes
"""
return self._directory
@directory.setter
def directory(self, value):
if value is None:
self._directory = None
return
if not isinstance(value, str):
raise TypeError('Got unexpected type for directory')
parts = [entry.strip() for entry in value.split('/')]
self._directory = '/'.join([entry for entry in parts if entry != ''])
@property
def geometry_properties(self):
# type: () -> List[GeometryProperties]
"""
List[GeometryProperties]: The geometry properties.
"""
return self._geometry_properties
@geometry_properties.setter
def geometry_properties(self, value):
if value is None:
self._geometry_properties = []
return
if not isinstance(value, list):
raise TypeError('Got unexpected value for geometry properties')
self._geometry_properties = []
for entry in value:
self.add_geometry_property(entry)
def add_geometry_property(self, entry):
"""
Add a geometry property to the list.
.. warning::
Care should be taken that this list stay in sync with the parent geometry.
Parameters
----------
entry: Dict|GeometryProperties
The geometry properties instance of serialized version of it.
"""
if isinstance(entry, dict):
entry = GeometryProperties.from_dict(entry)
if not isinstance(entry, GeometryProperties):
raise TypeError('Got entry of unexpected type for geometry properties list')
self._geometry_properties.append(entry)
def get_geometry_property(self, item):
"""
Fetches the appropriate geometry property.
Parameters
----------
item : int|str
The geometry properties uid or integer index.
Returns
-------
GeometryProperties
Raises
------
KeyError
"""
return self.get_geometry_property_and_index(item)[0]
def get_geometry_property_and_index(self, item):
"""
Fetches the appropriate geometry property and its integer index.
Parameters
----------
item : int|str
The geometry properties uid or integer index.
Returns
-------
(GeometryProperties, int)
Raises
------
KeyError
"""
if isinstance(item, int):
return self._geometry_properties[item], item
elif isinstance(item, str):
for index, entry in enumerate(self.geometry_properties):
if entry.uid == item:
return entry, index
raise KeyError('Got unrecognized geometry key `{}`'.format(item))
@property
def parameters(self):
"""
Optional[Jsonable]: The parameters
"""
return self._parameters
@parameters.setter
def parameters(self, value):
if value is None or isinstance(value, Jsonable):
self._parameters = value
else:
raise TypeError('Got unexpected type for parameters')
@classmethod
def from_dict(cls, the_json):
"""
Deserialize from json.
Parameters
----------
the_json : Dict
Returns
-------
AnnotationProperties
"""
typ = the_json['type']
if typ != cls._type:
raise ValueError('AnnotationProperties cannot be constructed from {}'.format(the_json))
return cls(
name=the_json.get('name', None),
description=the_json.get('description', None),
directory=the_json.get('directory', None),
geometry_properties=the_json.get('geometry_properties', None),
parameters=the_json.get('parameters', None))
def to_dict(self, parent_dict=None):
"""
Serialize to json.
Parameters
----------
parent_dict : None|Dict
Returns
-------
Dict
"""
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
for field in ['name', 'description', 'directory']:
value = getattr(self, field)
if value is not None:
parent_dict[field] = value
if self.geometry_properties is not None:
parent_dict['geometry_properties'] = [entry.to_dict() for entry in self.geometry_properties]
if self.parameters is not None:
parent_dict['parameters'] = self.parameters.to_dict()
return parent_dict
def replicate(self):
geom_properties = None if self.geometry_properties is None else \
[entry.replicate() for entry in self.geometry_properties]
params = None if self.parameters is None else self.parameters.replicate()
the_type = self.__class__
return the_type(
name=self.name, description=self.description, directory=self.directory,
geometry_properties=geom_properties, parameters=params)
class AnnotationFeature(Feature):
"""
An extension of the Feature class which has the properties attribute
populated with AnnotationProperties instance.
"""
_allowed_geometries = None
@property
def properties(self):
"""
The properties.
Returns
-------
None|AnnotationProperties
"""
return self._properties
@properties.setter
def properties(self, properties):
if properties is None:
self._properties = AnnotationProperties()
elif isinstance(properties, AnnotationProperties):
self._properties = properties
elif isinstance(properties, dict):
self._properties = AnnotationProperties.from_dict(properties)
else:
raise TypeError('Got an unexpected type for properties attribute of class {}'.format(self.__class__))
def get_name(self):
"""
Gets a useful name.
Returns
-------
str
"""
if self.properties is None or self.properties.name is None:
return self.uid
return self.properties.name
@property
def geometry(self):
"""
The geometry object.
Returns
-------
GeometryObject|GeometryCollection
"""
return self._geometry
@geometry.setter
def geometry(self, geometry):
if isinstance(geometry, dict):
geometry = Geometry.from_dict(geometry)
if geometry is None:
self._geometry = None
return
if not isinstance(geometry, Geometry):
raise TypeError('geometry must be an instance of Geometry, got `{}`'.format(type(geometry)))
if geometry.is_collection:
geometry = basic_assemble_from_collection(geometry)
self._geometry = self._validate_geometry_element(geometry)
@property
def geometry_count(self):
"""
int: The number of base geometry elements
"""
if self.geometry is None:
return 0
elif not self.geometry.is_collection:
return 1
else:
return len(self.geometry.collection)
def get_geometry_name(self, item):
"""
Gets the name, or a reasonable default, for the geometry.
Parameters
----------
item : int|str
Returns
-------
str
"""
geometry, geom_properties = self.get_geometry_and_geometry_properties(item)
return '<{}>'.format(geometry.__class__.__name__) if geom_properties.name is None else geom_properties.name
def get_geometry_property(self, item):
"""
Gets the geometry properties object for the given index/uid.
Parameters
----------
item : int|str
The geometry properties uid or integer index.
Returns
-------
GeometryProperties
Raises
------
KeyError
"""
return self.properties.get_geometry_property(item)
def get_geometry_property_and_index(self, item):
"""
Gets the geometry properties object and integer index for the given index/uid.
Parameters
----------
item : int|str
The geometry properties uid or integer index.
Returns
-------
(GeometryProperties, int)
Raises
------
KeyError
"""
return self.properties.get_geometry_property_and_index(item)
def get_geometry_and_geometry_properties(self, item):
"""
Gets the geometry and geometry properties object for the given index/uid.
Parameters
----------
item : int|str
The geometry properties uid or integer index.
Returns
-------
(Point|Line|Polygon, GeometryProperties)
Raises
------
KeyError
"""
if self.geometry is None:
raise ValueError('No geometry defined.')
geom_prop, index = self.get_geometry_property_and_index(item)
index = int(index)
if not (0 <= index < self.geometry_count):
raise KeyError('invalid geometry index')
if self.geometry.is_collection:
return self.geometry.collection[index], geom_prop
else:
return self.geometry, geom_prop
def get_geometry_element(self, item):
"""
Gets the basic geometry object at the given index.
Parameters
----------
item : int|str
The integer index or associated geometry properties uid.
Returns
-------
Point|Line|Polygon
Raises
------
ValueError|KeyError
"""
return self.get_geometry_and_geometry_properties(item)[0]
def _validate_geometry_element(self, geometry):
if geometry is None:
return geometry
if not isinstance(geometry, Geometry):
raise TypeError('geometry must be an instance of Geometry base class')
if self._allowed_geometries is not None and geometry.__class__ not in self._allowed_geometries:
raise TypeError('geometry ({}) is not of one of the allowed types ({})'.format(geometry, self._allowed_geometries))
return geometry
def add_geometry_element(self, geometry, properties=None):
"""
Adds the given geometry to the feature geometry (collection).
Parameters
----------
geometry : GeometryObject
properties : None|GeometryProperties
"""
if not isinstance(geometry, GeometryObject):
raise TypeError('geometry must be a GeometryObject instance')
if properties is None:
properties = GeometryProperties()
if not isinstance(properties, GeometryProperties):
raise TypeError('properties must be a GeometryProperties instance')
if self.properties is None:
self.properties = AnnotationProperties()
# handle the geometry
self._geometry = self._validate_geometry_element(
basic_assemble_from_collection(self.geometry, geometry))
# add the geometry property
self.properties.add_geometry_property(properties)
# check that they are in sync
if len(self.properties.geometry_properties) != self.geometry_count:
logger.warning(
'There are {} geometry elements defined\n\t'
'and {} geometry properties populated. '
'This is likely to cause problems.'.format(
self.geometry_count, len(self.properties.geometry_properties)))
def remove_geometry_element(self, item):
"""
Remove the geometry element at the given index
Parameters
----------
item : int|str
"""
_, index = self.get_geometry_property_and_index(item)
if self.geometry_count == 1:
self.geometry = None
self.properties = None
elif self.geometry_count == 2:
del self.geometry.collection[index]
del self.properties.geometry_properties[index]
self.geometry = self.geometry.collection[0]
else:
del self.geometry.collection[index]
del self.properties.geometry_properties[index]
class AnnotationCollection(FeatureCollection):
"""
An extension of the FeatureCollection class which has the features are
AnnotationFeature instances.
"""
@property
def features(self):
"""
The features list.
Returns
-------
List[AnnotationFeature]
"""
return self._features
@features.setter
def features(self, features):
if features is None:
self._features = None
self._feature_dict = None
return
if not isinstance(features, list):
raise TypeError('features must be a list of AnnotationFeatures. Got {}'.format(type(features)))
for entry in features:
self.add_feature(entry)
def add_feature(self, feature):
"""
Add an annotation.
Parameters
----------
feature : AnnotationFeature|Dict
"""
if isinstance(feature, dict):
feature = AnnotationFeature.from_dict(feature)
if not isinstance(feature, AnnotationFeature):
raise TypeError('This requires an AnnotationFeature instance, got {}'.format(type(feature)))
if self._features is None:
self._feature_dict = {feature.uid: 0}
self._features = [feature, ]
else:
self._feature_dict[feature.uid] = len(self._features)
self._features.append(feature)
def __getitem__(self, item):
# type: (Any) -> Union[AnnotationFeature, List[AnnotationFeature]]
if self._features is None:
raise StopIteration
if isinstance(item, str):
index = self._feature_dict[item]
return self._features[index]
return self._features[item]
class FileAnnotationCollection(Jsonable):
"""
An collection of annotation elements associated with a given single image element file.
"""
__slots__ = (
'_version', '_image_file_name', '_image_id', '_core_name', '_annotations')
_type = 'FileAnnotationCollection'
def __init__(self, version=None, annotations=None, image_file_name=None, image_id=None, core_name=None):
if version is None:
version = _BASE_VERSION
self._version = version
self._annotations = None
if image_file_name is None:
self._image_file_name = None
elif isinstance(image_file_name, str):
self._image_file_name = os.path.split(image_file_name)[1]
else:
raise TypeError('image_file_name must be a None or a string')
self._image_id = image_id
self._core_name = core_name
if self._image_file_name is None and self._image_id is None and self._core_name is None:
logger.error('One of image_file_name, image_id, or core_name should be defined.')
self.annotations = annotations
@property
def version(self):
"""
str: The version
"""
return self._version
@property
def image_file_name(self):
"""
The image file name, if appropriate.
Returns
-------
None|str
"""
return self._image_file_name
@property
def image_id(self):
"""
The image id, if appropriate.
Returns
-------
None|str
"""
return self._image_id
@property
def core_name(self):
"""
The image core name, if appropriate.
Returns
-------
None|str
"""
return self._core_name
@property
def annotations(self):
"""
The annotations.
Returns
-------
AnnotationCollection
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
# type: (Union[None, AnnotationCollection, Dict]) -> None
if annotations is None:
self._annotations = None
return
if isinstance(annotations, AnnotationCollection):
self._annotations = annotations
elif isinstance(annotations, dict):
self._annotations = AnnotationCollection.from_dict(annotations)
else:
raise TypeError(
'annotations must be an AnnotationCollection. Got type {}'.format(type(annotations)))
def add_annotation(self, annotation):
"""
Add an annotation.
Parameters
----------
annotation : AnnotationFeature
The prospective annotation.
"""
if isinstance(annotation, dict):
annotation = AnnotationFeature.from_dict(annotation)
if not isinstance(annotation, AnnotationFeature):
raise TypeError('This requires an AnnotationFeature instance. Got {}'.format(type(annotation)))
if self._annotations is None:
self._annotations = AnnotationCollection()
self._annotations.add_feature(annotation)
def delete_annotation(self, annotation_id):
"""
Deletes the annotation associated with the given id.
Parameters
----------
annotation_id : str
"""
del self._annotations[annotation_id]
@classmethod
def from_file(cls, file_name):
"""
Read from (json) file.
Parameters
----------
file_name : str
Returns
-------
FileAnnotationCollection
"""
with open(file_name, 'r') as fi:
the_dict = json.load(fi)
return cls.from_dict(the_dict)
@classmethod
def from_dict(cls, the_dict):
"""
Define from a dictionary representation.
Parameters
----------
the_dict : dict
Returns
-------
FileAnnotationCollection
"""
if not isinstance(the_dict, dict):
raise TypeError('This requires a dict. Got type {}'.format(type(the_dict)))
typ = the_dict.get('type', 'NONE')
if typ != cls._type:
raise ValueError('FileAnnotationCollection cannot be constructed from the input dictionary')
return cls(
version=the_dict.get('version', 'UNKNOWN'),
annotations=the_dict.get('annotations', None),
image_file_name=the_dict.get('image_file_name', None),
image_id=the_dict.get('image_id', None),
core_name=the_dict.get('core_name', None))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['version'] = self.version
if self.image_file_name is not None:
parent_dict['image_file_name'] = self.image_file_name
if self.image_id is not None:
parent_dict['image_id'] = self.image_id
if self.core_name is not None:
parent_dict['core_name'] = self.core_name
if self.annotations is not None:
parent_dict['annotations'] = self.annotations.to_dict()
return parent_dict
def to_file(self, file_name):
with open(file_name, 'w') as fi:
json.dump(self.to_dict(), fi, indent=1)
| 24,450 | 26.597065 | 127 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde.py | """
Simple helper functions for constructing the NGA modified AFRL/RDE structure
assuming either a known ground truth scenario or inferred analyst truth
scenario.
"""
__classification__ = 'UNCLASSIFIED'
__author__ = "Thomas McCullough"
from typing import List, Dict, Union, Optional
import numpy
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd import SICDReader
from sarpy.annotation.afrl_rde_elements.blocks import LabelSourceType
from sarpy.annotation.afrl_rde_elements.Research import ResearchType
from sarpy.annotation.afrl_rde_elements.CollectionInfo import CollectionInfoType
from sarpy.annotation.afrl_rde_elements.SubCollectionInfo import SubCollectionInfoType
from sarpy.annotation.afrl_rde_elements.ObjectInfo import ObjectInfoType, \
TheObjectType, GeoLocationType as ObjectGeoLocation, \
ImageLocationType as ObjectImageLocation, SizeType, OrientationType, \
StringWithComponentType
from sarpy.annotation.afrl_rde_elements.FiducialInfo import FiducialInfoType, \
TheFiducialType, GeoLocationType as FiducialGeoLocation, \
ImageLocationType as FiducialImageLocation
from sarpy.annotation.afrl_rde_elements.ImageInfo import ImageInfoType
from sarpy.annotation.afrl_rde_elements.SensorInfo import SensorInfoType
from sarpy.annotation.label import LabelSchema, FileLabelCollection, LabelCollection, \
LabelFeature, LabelProperties, LabelMetadata
class GroundTruthConstructor(object):
"""
This class is a helper for performing a ground truth construction.
"""
__slots__ = (
'_collection_info', '_subcollection_info', '_label_source', '_objects', '_fiducials')
def __init__(
self, collection_info: CollectionInfoType,
subcollection_info: SubCollectionInfoType,
label_source: Optional[LabelSourceType] = None):
"""
Parameters
----------
collection_info : CollectionInfoType
subcollection_info : SubCollectionInfoType
label_source : None|LabelSourceType
"""
self._collection_info = collection_info
self._subcollection_info = subcollection_info
if label_source is None:
self._label_source = LabelSourceType(SourceType='Ground Truth', SourceID='Unspecified')
else:
self._label_source = label_source
self._objects = []
self._fiducials = []
def add_fiducial(self, the_fiducial: TheFiducialType) -> None:
"""
Adds the given fiducial to the collection.
Parameters
----------
the_fiducial : TheFiducialType
"""
if not isinstance(the_fiducial, TheFiducialType):
raise TypeError('Requires an object of type `TheFiducialType`, got `{}`'.format(type(the_fiducial)))
if the_fiducial.ImageLocation is not None:
raise ValueError('The fiducial has ImageLocation already set.')
if the_fiducial.SlantPlane is not None or the_fiducial.GroundPlane is not None:
raise ValueError('The fiducial already has the SlantPlane or GroundPlane set.')
self._fiducials.append(the_fiducial)
def add_fiducial_from_arguments(
self,
Name: str = None,
SerialNumber: Optional[str] = None,
FiducialType: Optional[str] = None,
GeoLocation: FiducialGeoLocation = None) -> None:
"""
Adds a fiducial to the collection.
Parameters
----------
Name : str
SerialNumber : None|str
FiducialType : None|str
GeoLocation : FiducialGeoLocation
"""
self.add_fiducial(
TheFiducialType(
Name=Name,
SerialNumber=SerialNumber,
FiducialType=FiducialType,
GeoLocation=GeoLocation))
def add_object(
self,
the_object: TheObjectType) -> None:
"""
Adds the given object to the collection.
Parameters
----------
the_object : TheObjectType
"""
if not isinstance(the_object, TheObjectType):
raise TypeError('Requires an object of type `TheObjectType`, got `{}`'.format(type(the_object)))
if the_object.ImageLocation is not None:
raise ValueError('The object has ImageLocation already set.')
if the_object.SlantPlane is not None or the_object.GroundPlane is not None:
raise ValueError('The object already has the SlantPlane or GroundPlane set.')
self._objects.append(the_object)
def add_object_from_arguments(
self,
SystemName: str = None,
SystemComponent: Optional[str] = None,
NATOName: Optional[str] = None,
Function: Optional[str] = None,
Version: Optional[str] = None,
DecoyType: Optional[str] = None,
SerialNumber: Optional[str] = None,
ObjectClass: str = 'Unknown',
ObjectSubClass: str = 'Unknown',
ObjectTypeClass: str = 'Unknown',
ObjectType: str = 'Unknown',
ObjectLabel: str = None,
Size: Optional[Union[SizeType, numpy.ndarray, list, tuple]] = None,
Orientation: OrientationType = None,
Articulation: Union[None, str, StringWithComponentType, List[StringWithComponentType]] = None,
Configuration: Union[None, str, StringWithComponentType, List[StringWithComponentType]] = None,
Accessories: Optional[str] = None,
PaintScheme: Optional[str] = None,
Camouflage: Optional[str] = None,
Obscuration: Optional[str] = None,
ObscurationPercent: Optional[float] = None,
ImageLevelObscuration: Optional[str] = None,
GeoLocation: ObjectGeoLocation = None,
TargetToClutterRatio: Optional[str] = None,
VisualQualityMetric: Optional[str] = None,
UnderlyingTerrain: Optional[str] = None,
OverlyingTerrain: Optional[str] = None,
TerrainTexture: Optional[str] = None,
SeasonalCover: Optional[str] = None) -> None:
"""
Adds an object to the collection.
Parameters
----------
SystemName : str
SystemComponent : None|str
NATOName : None|str
Function : None|str
Version : None|str
DecoyType : None|str
SerialNumber : None|str
ObjectClass : None|str
ObjectSubClass : None|str
ObjectTypeClass : None|str
ObjectType : None|str
ObjectLabel : None|str
Size : None|SizeType|numpy.ndarray|list|tuple
Orientation : OrientationType
Articulation : None|str|StringWithCompoundType|List[StringWithCompoundType]
Configuration : None|str|StringWithCompoundType|List[StringWithCompoundType]
Accessories : None|str
PaintScheme : None|str
Camouflage : None|str
Obscuration : None|str
ObscurationPercent : None|float
ImageLevelObscuration : None|str
GeoLocation : ObjectGeoLocation
TargetToClutterRatio : None|str
VisualQualityMetric : None|str
UnderlyingTerrain : None|str
OverlyingTerrain : None|str
TerrainTexture : None|str
SeasonalCover : None|str
"""
self.add_object(
TheObjectType(SystemName=SystemName,
SystemComponent=SystemComponent,
NATOName=NATOName,
Function=Function,
Version=Version,
DecoyType=DecoyType,
SerialNumber=SerialNumber,
ObjectClass=ObjectClass,
ObjectSubClass=ObjectSubClass,
ObjectTypeClass=ObjectTypeClass,
ObjectType=ObjectType,
ObjectLabel=ObjectLabel,
Size=Size,
Orientation=Orientation,
Articulation=Articulation,
Configuration=Configuration,
Accessories=Accessories,
PaintScheme=PaintScheme,
Camouflage=Camouflage,
Obscuration=Obscuration,
ObscurationPercent=ObscurationPercent,
ImageLevelObscuration=ImageLevelObscuration,
GeoLocation=GeoLocation,
TargetToClutterRatio=TargetToClutterRatio,
VisualQualityMetric=VisualQualityMetric,
UnderlyingTerrain=UnderlyingTerrain,
OverlyingTerrain=OverlyingTerrain,
TerrainTexture=TerrainTexture,
SeasonalCover=SeasonalCover))
def get_final_structure(self) -> ResearchType:
"""
It is anticipated that this might be reused to localize for a whole series
of different sicd files.
Gets **a static copy** of the constructed AFRL Research structure. This has the
provided CollectionInfo and SubCollectionInfo populated. It also
has the ObjectInfo and FiducialInfo with the GeoLocation
ground truth details that have been provided.
No image location information has been populated, and there are no
ImageInfo or SensorInfo populated, because these are independent
of ground truth.
Returns
-------
ResearchType
"""
return ResearchType(
DetailCollectionInfo=self._collection_info,
DetailSubCollectionInfo=self._subcollection_info,
DetailFiducialInfo=FiducialInfoType(
NumberOfFiducialsInScene=len(self._fiducials),
NumberOfFiducialsInImage=len(self._fiducials),
LabelSource=self._label_source,
Fiducials=self._fiducials),
DetailObjectInfo=ObjectInfoType(
NumberOfObjectsInScene=len(self._objects),
NumberOfObjectsInImage=len(self._objects),
LabelSource=self._label_source,
Objects=self._objects)).copy()
def localize_for_sicd(
self,
sicd: SICDType,
base_sicd_file: str,
layover_shift: bool = False,
populate_in_periphery: bool = False,
include_out_of_range: bool = False,
padding_fraction: Optional[float] = 0.05,
minimum_pad: Union[int, float] = 0,
md5_checksum: Optional[str] = None):
"""
Localize the AFRL structure for the given sicd structure.
This returns **a static copy** of the AFRL structure, and this method
can be repeatedly applied for a sequence of different sicd files which all
apply to the same ground truth scenario.
Parameters
----------
sicd : SICDType
base_sicd_file : str
layover_shift : bool
populate_in_periphery : bool
include_out_of_range : bool
padding_fraction : None|float
minimum_pad : int|float
md5_checksum : None|str
Returns
-------
ResearchType
"""
out_research = self.get_final_structure()
out_research.apply_sicd(
sicd,
base_sicd_file,
layover_shift=layover_shift,
populate_in_periphery=populate_in_periphery,
include_out_of_range=include_out_of_range,
padding_fraction=padding_fraction,
minimum_pad=minimum_pad,
md5_checksum=md5_checksum)
return out_research
def localize_for_sicd_reader(
self,
sicd_reader: SICDReader,
layover_shift: bool = False,
populate_in_periphery: bool = False,
include_out_of_range: bool = False,
padding_fraction: Optional[float] = 0.05,
minimum_pad: Union[int, float] = 0,
populate_md5: bool = True):
"""
Localize the AFRL structure for the given sicd file.
This returns **a static copy** of the AFRL structure, and this method
can be repeatedly applied for a sequence of different sicd files which all
apply to the same ground truth scenario.
Parameters
----------
sicd_reader : SICDReader
layover_shift : bool
populate_in_periphery : bool
include_out_of_range : bool
padding_fraction : None|float
minimum_pad : int|float
populate_md5 : bool
Returns
-------
ResearchType
"""
out_research = self.get_final_structure()
out_research.apply_sicd_reader(
sicd_reader,
layover_shift=layover_shift,
populate_in_periphery=populate_in_periphery,
include_out_of_range=include_out_of_range,
padding_fraction=padding_fraction,
minimum_pad=minimum_pad,
populate_md5=populate_md5)
return out_research
class AnalystTruthConstructor(object):
"""
This class is a helper for performing an analyst truth construction.
"""
__slots__ = (
'_sicd', '_base_file',
'_collection_info', '_subcollection_info', '_image_info', '_sensor_info',
'_label_source', '_objects', '_fiducials',
'_projection_type', '_proj_kwargs')
def __init__(
self,
sicd: SICDType,
base_file: str,
collection_info: CollectionInfoType,
subcollection_info: SubCollectionInfoType,
label_source: Optional[LabelSourceType] = None,
projection_type: str = 'HAE',
proj_kwargs: Optional[Dict] = None,
md5_checksum: Optional[str] = None):
"""
Parameters
----------
sicd : SICDType
base_file : str
collection_info : CollectionInfoType
subcollection_info : SubCollectionInfoType
label_source : None|LabelSourceType
projection_type : str
One of 'PLANE', 'HAE', or 'DEM'. The value of `proj_kwargs`
will need to be appropriate.
proj_kwargs : None|Dict
The keyword arguments for the :func:`SICDType.project_image_to_ground_geo` method.
md5_checksum : None|str
The MD5 checksum of the full image file.
"""
self._sicd = sicd
self._base_file = base_file
# TODO: should we create a decent shell for general Analyst Truth
# collection and subcollection info?
self._collection_info = collection_info
self._subcollection_info = subcollection_info
self._image_info = ImageInfoType.from_sicd(self._sicd, self._base_file, md5_checksum=md5_checksum)
self._sensor_info = SensorInfoType.from_sicd(self._sicd)
if label_source is None:
self._label_source = LabelSourceType(SourceType='Analyst Truth', SourceID='Unspecified')
else:
self._label_source = label_source
self._objects = []
self._fiducials = []
self._projection_type = projection_type
self._proj_kwargs = {} if proj_kwargs is None else proj_kwargs
@property
def image_info(self) -> ImageInfoType:
"""
ImageInfoType: The basic image info object derived from the sicd
"""
return self._image_info
@property
def sensor_info(self) -> SensorInfoType:
"""
SensorInfoType: The basic sensor info object derived from the sicd.
"""
return self._sensor_info
def add_fiducial(self, the_fiducial: TheFiducialType) -> None:
"""
Adds the given fiducial to the collection. Note that this object will be modified in place.
Parameters
----------
the_fiducial : TheFiducialType
"""
if not isinstance(the_fiducial, TheFiducialType):
raise TypeError('Requires an object of type `TheFiducialType`, got `{}`'.format(type(the_fiducial)))
if the_fiducial.GeoLocation is not None:
raise ValueError('The fiducial has GeoLocation already set.')
the_fiducial.set_geo_location_from_sicd(self._sicd, projection_type=self._projection_type, **self._proj_kwargs)
self._fiducials.append(the_fiducial)
def add_fiducial_from_arguments(
self,
Name: Optional[str] = None,
SerialNumber: Optional[str] = None,
FiducialType: Optional[str] = None,
ImageLocation: FiducialImageLocation = None):
"""
Adds a fiducial to the collection.
Parameters
----------
Name : None|str
SerialNumber : None|str
FiducialType : None|str
ImageLocation : FiducialImageLocation
"""
self.add_fiducial(
TheFiducialType(
Name=Name,
SerialNumber=SerialNumber,
FiducialType=FiducialType,
ImageLocation=ImageLocation))
def add_object(
self,
the_object: TheObjectType,
padding_fraction: Optional[float] = 0.05,
minimum_pad: Union[int, float] = 0):
"""
Adds the object to the collection. Note that this object will be modified in place.
Parameters
----------
the_object : TheObjectType
padding_fraction : None|float
Default fraction of box dimension by which to pad.
minimum_pad : float|int
The minimum number of pixels by which to pad for the chip
"""
if not isinstance(the_object, TheObjectType):
raise TypeError('Requires an object of type `TheObjectType`, got `{}`'.format(type(the_object)))
if the_object.GeoLocation is not None:
raise ValueError('The object has GeoLocation already set.')
the_object.set_geo_location_from_sicd(
self._sicd, projection_type=self._projection_type, **self._proj_kwargs)
the_object.set_chip_details_from_sicd(
self._sicd, populate_in_periphery=True, padding_fraction=padding_fraction, minimum_pad=minimum_pad)
self._objects.append(the_object)
def add_object_from_arguments(
self,
padding_fraction: float = 0.05,
minimum_pad: Union[int, float] = 0,
SystemName: str = None,
SystemComponent: Optional[str] = None,
NATOName: Optional[str] = None,
Function: Optional[str] = None,
Version: Optional[str] = None,
DecoyType: Optional[str] = None,
SerialNumber: Optional[str] = None,
ObjectClass: str = 'Unknown',
ObjectSubClass: str = 'Unknown',
ObjectTypeClass: str = 'Unknown',
ObjectType: str = 'Unknown',
ObjectLabel: str = None,
Size: Union[None, SizeType, numpy.ndarray, list, tuple] = None,
Orientation: OrientationType = None,
Articulation: Union[None, str, StringWithComponentType, List[StringWithComponentType]] = None,
Configuration: Union[None, str, StringWithComponentType, List[StringWithComponentType]] = None,
Accessories: Optional[str] = None,
PaintScheme: Optional[str] = None,
Camouflage: Optional[str] = None,
Obscuration: Optional[str] = None,
ObscurationPercent: Optional[float] = None,
ImageLevelObscuration: Optional[str] = None,
ImageLocation: ObjectImageLocation = None,
TargetToClutterRatio: Optional[str] = None,
VisualQualityMetric: Optional[str] = None,
UnderlyingTerrain: Optional[str] = None,
OverlyingTerrain: Optional[str] = None,
TerrainTexture: Optional[str] = None,
SeasonalCover: Optional[str] = None) -> None:
"""
Adds an object to the collection.
Parameters
----------
padding_fraction : None|float
Default fraction of box dimension by which to pad.
minimum_pad : float|int
SystemName : str
SystemComponent : None|str
NATOName : None|str
Function : None|str
Version : None|str
DecoyType : None|str
SerialNumber : None|str
ObjectClass : None|str
ObjectSubClass : None|str
ObjectTypeClass : None|str
ObjectType : None|str
ObjectLabel : None|str
Size : None|SizeType|numpy.ndarray|list|tuple
Orientation : OrientationType
Articulation : None|str|StringWithComponentType|List[StringWithComponentType]
Configuration : None|str|StringWithComponentType|List[StringWithComponentType]
Accessories : None|str
PaintScheme : None|str
Camouflage : None|str
Obscuration : None|str
ObscurationPercent : None|float
ImageLevelObscuration : None|str
ImageLocation : ObjectImageLocation
TargetToClutterRatio : None|str
VisualQualityMetric : None|str
UnderlyingTerrain : None|str
OverlyingTerrain : None|str
TerrainTexture : None|str
SeasonalCover : None|str
"""
self.add_object(
TheObjectType(SystemName=SystemName,
SystemComponent=SystemComponent,
NATOName=NATOName,
Function=Function,
Version=Version,
DecoyType=DecoyType,
SerialNumber=SerialNumber,
ObjectClass=ObjectClass,
ObjectSubClass=ObjectSubClass,
ObjectTypeClass=ObjectTypeClass,
ObjectType=ObjectType,
ObjectLabel=ObjectLabel,
Size=Size,
Orientation=Orientation,
Articulation=Articulation,
Configuration=Configuration,
Accessories=Accessories,
PaintScheme=PaintScheme,
Camouflage=Camouflage,
Obscuration=Obscuration,
ObscurationPercent=ObscurationPercent,
ImageLevelObscuration=ImageLevelObscuration,
ImageLocation=ImageLocation,
TargetToClutterRatio=TargetToClutterRatio,
VisualQualityMetric=VisualQualityMetric,
UnderlyingTerrain=UnderlyingTerrain,
OverlyingTerrain=OverlyingTerrain,
TerrainTexture=TerrainTexture,
SeasonalCover=SeasonalCover),
padding_fraction=padding_fraction,
minimum_pad=minimum_pad)
def get_final_structure(self) -> ResearchType:
"""
This is not anticipated to be reused, so the raw progress to date is returned.
Care should be taken in modifying the returned structure directly.
Returns
-------
ResearchType
"""
return ResearchType(
DetailCollectionInfo=self._collection_info,
DetailSubCollectionInfo=self._subcollection_info,
DetailImageInfo=self._image_info,
DetailSensorInfo=self._sensor_info,
DetailFiducialInfo=FiducialInfoType(
NumberOfFiducialsInScene=len(self._fiducials),
NumberOfFiducialsInImage=len(self._fiducials),
LabelSource=self._label_source,
Fiducials=self._fiducials),
DetailObjectInfo=ObjectInfoType(
NumberOfObjectsInScene=len(self._objects),
NumberOfObjectsInImage=len(self._objects),
LabelSource=self._label_source,
Objects=self._objects))
def convert_afrl_to_native(
research: ResearchType,
include_chip: bool = False) -> FileLabelCollection:
"""
Converts an AFRL structure to a label structure for simple viewing.
Parameters
----------
research : ResearchType
include_chip : bool
Include the chip definition in the geometry structure?
Returns
-------
FileLabelCollection
"""
def _convert_object_to_json(t_object: TheObjectType) -> LabelFeature:
# extract the "properties"
geometry, geometry_properties = t_object.get_image_geometry_object_for_sicd(include_chip=include_chip)
feature = LabelFeature(
geometry=geometry,
properties=LabelProperties(
name=t_object.SystemName,
geometry_properties=geometry_properties))
feature.add_annotation_metadata(LabelMetadata(label_id=t_object.ObjectLabel))
return feature
if not isinstance(research, ResearchType):
raise TypeError('Expected ResearchType, got type `{}`'.format(type(research)))
if research.DetailObjectInfo is None or \
research.DetailObjectInfo.Objects is None or \
len(research.DetailObjectInfo.Objects) == 0:
raise ValueError('Nothing to be done')
# create our adhoc containers
label_schema = LabelSchema(version='AdHoc')
annotation_collection = LabelCollection()
for the_object in research.DetailObjectInfo.Objects:
new_key = the_object.ObjectLabel
if new_key not in label_schema.labels:
label_schema.add_entry(new_key, new_key)
annotation_collection.add_feature(_convert_object_to_json(the_object))
# finalize the collection
return FileLabelCollection(
label_schema,
annotations=annotation_collection,
image_file_name=research.DetailImageInfo.DataFilename)
| 26,050 | 38.056972 | 119 | py |
sarpy | sarpy-master/sarpy/annotation/rcs.py | """
This module provides structures for annotating a given SICD type file for RCS
calculations
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from collections import OrderedDict, defaultdict
import json
from typing import Union, Any, List
import numpy
from sarpy.geometry.geometry_elements import Jsonable, Polygon, MultiPolygon
from sarpy.annotation.base import AnnotationFeature, AnnotationProperties, \
AnnotationCollection, FileAnnotationCollection
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.utils import get_im_physical_coords
_RCS_VERSION = "RCS:1.0"
logger = logging.getLogger(__name__)
DEFAULT_NAME_MAPPING = OrderedDict(
RCS='RCSSFPoly',
BetaZero='BetaZeroSFPoly',
GammaZero='GammaZeroSFPoly',
SigmaZero='SigmaZeroSFPoly')
def _get_polygon_bounds(polygon, data_size):
"""
Gets the row/column bounds for the polygon and a polygon inclusion mask for
the defined rectangular pixel grid.
Parameters
----------
polygon : Polygon
data_size : Tuple[int, int]
Returns
-------
row_bounds : Tuple[int, int]
The lower and upper bounds for the rows.
col_bounds : Tuple[int, int]
The lower and upper bounds for the columns.
mask: numpy.ndarray
The boolean inclusion mask.
"""
if not isinstance(polygon, Polygon):
raise TypeError('polygon must be an instance of Polygon, got type {}'.format(type(polygon)))
bounding_box = polygon.get_bbox()
if len(bounding_box) != 4:
raise ValueError('Got unexpected bounding box {}'.format(bounding_box))
row_min = max(0, int(numpy.floor(bounding_box[0])))
row_max = min(int(numpy.floor(bounding_box[2])) + 1, data_size[0])
col_min = max(0, int(numpy.floor(bounding_box[1])))
col_max = min(int(numpy.floor(bounding_box[3])) + 1, data_size[1])
row_bounds = (row_min, row_max)
col_bounds = (col_min, col_max)
mask = polygon.grid_contained(
numpy.arange(row_bounds[0], row_bounds[1]),
numpy.arange(col_bounds[0], col_bounds[1]))
return row_bounds, col_bounds, mask
def create_rcs_value_collection_for_reader(reader, polygon):
"""
Given a SICD type reader and a polygon with coordinates in pixel space
(all sicd footprint assumed applicable), construct the `RCSValueCollection`.
Parameters
----------
reader : SICDTypeReader
polygon : Polygon|MultiPolygon
Returns
-------
RCSValueCollection
"""
def evaluate_sicd(the_sicd):
# type: (SICDType) -> (bool, bool)
if the_sicd.Radiometric is None:
return False, False
if the_sicd.Radiometric.NoiseLevel is None:
return True, False
if the_sicd.Radiometric.NoiseLevel.NoiseLevelType == 'ABSOLUTE':
return True, True
else:
return True, False
def get_stat_entries():
def get_empty_dict():
return dict(total=0.0, total2=0.0, count=0, min=numpy.inf, max=-numpy.inf)
return defaultdict(
lambda: dict(value=get_empty_dict(), noise=get_empty_dict()) if has_noise else dict(value=get_empty_dict()))
def calculate_statistics(array, the_entry):
# type: (numpy.ndarray, dict) -> None
the_entry['total'] += numpy.sum(array)
the_entry['total2'] += numpy.sum(array*array)
the_entry['count'] += array.size
the_entry['min'] = min(the_entry['min'], numpy.min(array))
the_entry['max'] = max(the_entry['max'], numpy.max(array))
def get_total_rcs(the_stats, the_pol, the_ind, oversamp):
if has_radiometric:
the_entry = the_stats['RCS']
name = 'TotalRCS'
val = the_entry['value']['total']/oversamp
noise_val = the_entry['noise']['total']/oversamp if has_noise else None
else:
the_entry = the_stats['PixelPower']
name = 'TotalPixelPower'
val = the_entry['value']['total']
noise_val = the_entry['noise']['total'] if has_noise else None
out = RCSValue(the_pol, name, the_ind, value=RCSStatistics(mean=val))
if has_noise:
out.noise = RCSStatistics(mean=noise_val)
return out
def get_rcs_value(the_stats, the_pol, name, the_ind):
def make_stat_entry(vals):
the_count = vals['count']
if the_count == 0:
the_mean = float('NaN')
the_var = float('NaN')
else:
the_mean = float(vals['total']/the_count)
the_var = vals['total2']/the_count - the_mean*the_mean
if the_var < 0:
the_var = 0 # to avoid floating point errors
return RCSStatistics(
mean=the_mean, std=float(numpy.sqrt(the_var)), min=float(vals['min']), max=float(vals['max']))
the_entry = the_stats[name]
noise_value = the_entry.get('noise', None)
out = RCSValue(the_pol, name, the_ind)
out.value = make_stat_entry(the_entry['value'])
if noise_value is not None:
out.noise = make_stat_entry(noise_value)
return out
# verify that all footprint are identical
data_sizes = reader.get_data_size_as_tuple()
if len(data_sizes) > 1:
for entry in data_sizes[1:]:
if entry != data_sizes[0]:
raise ValueError('Each image index must have identical size')
data_size = data_sizes[0]
if isinstance(polygon, Polygon):
polygons = [polygon, ]
elif isinstance(polygon, MultiPolygon):
polygons = polygon.polygons
else:
raise TypeError('polygon must be a Polygon or MultiPolygon, got type {}'.format(type(polygon)))
sicds = reader.get_sicds_as_tuple()
radiometric_signature = None
for sicd in sicds:
if radiometric_signature is None:
radiometric_signature = evaluate_sicd(sicd)
elif radiometric_signature != evaluate_sicd(sicd):
raise ValueError('All sicds in the reader must have compatible Radiometric definition')
has_radiometric, has_noise = radiometric_signature
# construct the statistics values - first/second moments and max/min
stat_values = [get_stat_entries() for _ in sicds]
for polygon in polygons:
row_bounds, col_bounds, mask = _get_polygon_bounds(polygon, data_size)
if not numpy.any(mask):
continue
for i, sicd in enumerate(sicds):
current_stat_entries = stat_values[i]
# define the pixel power array for the given polygon and image index
data = reader[row_bounds[0]:row_bounds[1], col_bounds[0]:col_bounds[1], i][mask]
data = data.real * data.real + data.imag * data.imag # get pixel power
# define the pixel power statistics
calculate_statistics(data, current_stat_entries['PixelPower']['value'])
if has_radiometric:
noise_poly = sicd.Radiometric.NoiseLevel.NoisePoly if has_noise else None
# construct the physical coordinate arrays
row_array = numpy.arange(row_bounds[0], row_bounds[1], 1, dtype=numpy.int32)
x_array = get_im_physical_coords(row_array, sicd.Grid, sicd.ImageData, 'Row')
col_array = numpy.arange(col_bounds[0], col_bounds[1], 1, dtype=numpy.int32)
y_array = get_im_physical_coords(col_array, sicd.Grid, sicd.ImageData, 'Col')
yarr, xarr = numpy.meshgrid(y_array, x_array)
xarr = xarr[mask]
yarr = yarr[mask]
noise_power = numpy.exp(numpy.log(10)*0.1*noise_poly(xarr, yarr)) if has_noise else None
if has_noise:
# add the noise statistics for the pixel power
calculate_statistics(noise_power, current_stat_entries['PixelPower']['noise'])
for units_name, rcs_poly_name in DEFAULT_NAME_MAPPING.items():
the_poly = getattr(sicd.Radiometric, rcs_poly_name)
sf_data = the_poly(xarr, yarr)
calculate_statistics(sf_data*data, current_stat_entries[units_name]['value'])
if has_noise:
calculate_statistics(sf_data*noise_power, current_stat_entries[units_name]['noise'])
# convert this collection of raw data to the RCSStatistics collection
rcs_values = RCSValueCollection()
for i, sicd in enumerate(sicds):
polarization = sicd.get_processed_polarization()
oversample = sicd.Grid.Row.get_oversample_rate()*sicd.Grid.Col.get_oversample_rate()
raw_stats = stat_values[i]
# create the total rcs/power entry
rcs_values.insert_new_element(get_total_rcs(raw_stats, polarization, i, oversample))
rcs_values.insert_new_element(get_rcs_value(raw_stats, polarization, 'PixelPower', i))
if has_radiometric:
for the_units in DEFAULT_NAME_MAPPING.keys():
rcs_values.insert_new_element(get_rcs_value(raw_stats, polarization, the_units, i))
return rcs_values
class RCSStatistics(Jsonable):
__slots__ = ('mean', 'std', 'max', 'min')
_type = 'RCSStatistics'
def __init__(self, mean=None, std=None, max=None, min=None):
"""
Parameters
----------
mean : None|float
All values are assumed the be stored here in units of power
std : None|float
All values are assumed the be stored here in units of power
max : None|float
All values are assumed the be stored here in units of power
min : None|float
All values are assumed the be stored here in units of power
"""
if mean is not None:
mean = float(mean)
if std is not None:
std = float(std)
if max is not None:
max = float(max)
if min is not None:
min = float(min)
self.mean = mean # type: Union[None, float]
self.std = std # type: Union[None, float]
self.max = max # type: Union[None, float]
self.min = min # type: Union[None, float]
@classmethod
def from_dict(cls, the_json):
typ = the_json['type']
if typ != cls._type:
raise ValueError('RCSStatistics cannot be constructed from {}'.format(the_json))
return cls(
mean=the_json.get('mean', None),
std=the_json.get('std', None),
max=the_json.get('max', None),
min=the_json.get('min', None))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
for attr in self.__slots__:
parent_dict[attr] = getattr(self, attr)
return parent_dict
def get_field_list(self):
if self.mean is None:
return '', '', '', '', ''
else:
mean_db_str = '' if self.mean <= 0 else '{0:0.5G}'.format(10*numpy.log10(self.mean))
return (
mean_db_str,
'{0:0.5G}'.format(self.mean),
'{0:0.5G}'.format(self.std) if self.std is not None else '',
'{0:0.5G}'.format(self.min) if self.min is not None else '',
'{0:0.5G}'.format(self.max) if self.max is not None else '',
)
class RCSValue(Jsonable):
"""
The collection of RCSStatistics elements.
"""
__slots__ = ('polarization', 'units', '_index', '_value', '_noise')
_type = 'RCSValue'
def __init__(self, polarization, units, index, value=None, noise=None):
"""
Parameters
----------
polarization : str
units: str
index : int
value : None|RCSStatistics
noise : None|RCSStatistics
"""
self._value = None
self._noise = None
self._index = None
self.polarization = polarization
self.units = units
self.index = index
self.value = value
self.noise = noise
@property
def value(self):
"""
None|RCSStatistics: The value
"""
return self._value
@value.setter
def value(self, val):
if isinstance(val, dict):
val = RCSStatistics.from_dict(val)
if not (val is None or isinstance(val, RCSStatistics)):
raise TypeError('Got incompatible input for value')
self._value = val
@property
def index(self):
"""
int: The image index to which this applies
"""
return self._index
@index.setter
def index(self, value):
if value is None:
value = 0
self._index = int(value)
@property
def noise(self):
"""
None|RCSStatistics: The noise
"""
return self._noise
@noise.setter
def noise(self, val):
if isinstance(val, dict):
val = RCSStatistics.from_dict(val)
if not (val is None or isinstance(val, RCSStatistics)):
raise TypeError('Got incompatible input for noise')
self._noise = val
@classmethod
def from_dict(cls, the_json): # type: (dict) -> RCSValue
typ = the_json['type']
if typ != cls._type:
raise ValueError('RCSValue cannot be constructed from {}'.format(the_json))
return cls(
the_json.get('polarization', None),
the_json.get('units', None),
the_json.get('index', None),
value=the_json.get('value', None),
noise=the_json.get('noise', None))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['polarization'] = self.polarization
parent_dict['units'] = self.units
parent_dict['index'] = self.index
if self.value is not None:
parent_dict['value'] = self.value.to_dict()
if self.noise is not None:
parent_dict['noise'] = self.noise.to_dict()
return parent_dict
class RCSValueCollection(Jsonable):
"""
A specific type for the AnnotationProperties.parameters
"""
__slots__ = ('_pixel_count', '_elements')
_type = 'RCSValueCollection'
def __init__(self, pixel_count=None, elements=None):
"""
Parameters
----------
pixel_count : None|int
elements : None|List[RCSValue|dict]
"""
self._pixel_count = None
self._elements = []
self.pixel_count = pixel_count
self.elements = elements
def __len__(self):
return len(self._elements)
def __getitem__(self, item):
# type: (Union[int, str]) -> Union[None, RCSValue]
return self._elements[item]
@property
def pixel_count(self):
# type: () -> Union[None, int]
"""
None|int: The number of integer pixel grid elements contained in the interior
of the associated geometry element.
"""
return self._pixel_count
@pixel_count.setter
def pixel_count(self, value):
if value is None:
self._pixel_count = None
return
if not isinstance(value, int):
value = int(value)
self._pixel_count = value
@property
def elements(self):
# type: () -> Union[None, List[RCSValue]]
"""
List[RCSValue]: The RCSValue elements.
"""
return self._elements
@elements.setter
def elements(self, elements):
if elements is None:
self._elements = []
return
if not isinstance(elements, list):
raise TypeError('elements must be a list of RCSValue elements')
self._elements = []
for element in elements:
self.insert_new_element(element)
def insert_new_element(self, element):
"""
Inserts an element at the end of the elements list.
Parameters
----------
element : RCSValue
"""
if isinstance(element, dict):
element = RCSValue.from_dict(element)
if not isinstance(element, RCSValue):
raise TypeError('element must be an RCSValue instance')
self._elements.append(element)
@classmethod
def from_dict(cls, the_json):
# type: (dict) -> RCSValueCollection
typ = the_json['type']
if typ != cls._type:
raise ValueError('RCSValueCollection cannot be constructed from {}'.format(the_json))
return cls(
pixel_count=the_json.get('pixel_count', None), elements=the_json.get('elements', None))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['pixel_count'] = self.pixel_count
if len(self._elements) > 0:
parent_dict['elements'] = [entry.to_dict() for entry in self._elements]
return parent_dict
class RCSProperties(AnnotationProperties):
_type = 'RCSProperties'
@property
def parameters(self):
"""
RCSValueCollection: The parameters
"""
return self._parameters
@parameters.setter
def parameters(self, value):
if value is None:
self._parameters = RCSValueCollection()
return
if isinstance(value, dict):
value = RCSValueCollection.from_dict(value)
if not isinstance(value, RCSValueCollection):
raise TypeError('Got unexpected type for parameters')
self._parameters = value
class RCSFeature(AnnotationFeature):
"""
A specific extension of the Feature class which has the properties attribute
populated with RCSValueCollection instance.
"""
_allowed_geometries = (Polygon, MultiPolygon)
@property
def properties(self):
# type: () -> RCSProperties
"""
The properties.
Returns
-------
RCSProperties
"""
return self._properties
@properties.setter
def properties(self, properties):
if properties is None:
self._properties = RCSProperties()
elif isinstance(properties, RCSProperties):
self._properties = properties
elif isinstance(properties, dict):
self._properties = RCSProperties.from_dict(properties)
else:
raise TypeError('properties must be an RCSProperties')
def set_rcs_parameters_from_reader(self, reader):
"""
Given a SICD type reader construct the `RCSValueCollection` and set that
as the properties.parameters value.
Parameters
----------
reader : SICDTypeReader
"""
if self.geometry is None or self.geometry_count == 0:
self.properties.parameters = None
else:
# noinspection PyTypeChecker
self.properties.parameters = create_rcs_value_collection_for_reader(
reader, self.geometry)
class RCSCollection(AnnotationCollection):
"""
A specific extension of the AnnotationCollection class which has that the
features are RCSFeature instances.
"""
@property
def features(self):
"""
The features list.
Returns
-------
List[RCSFeature]
"""
return self._features
@features.setter
def features(self, features):
if features is None:
self._features = None
self._feature_dict = None
return
if not isinstance(features, list):
raise TypeError('features must be a list of RCSFeatures. Got {}'.format(type(features)))
for entry in features:
self.add_feature(entry)
def add_feature(self, feature):
"""
Add an annotation.
Parameters
----------
feature : RCSFeature|dict
"""
if isinstance(feature, dict):
feature = RCSFeature.from_dict(feature)
if not isinstance(feature, RCSFeature):
raise TypeError('This requires an RCSFeature instance, got {}'.format(type(feature)))
if self._features is None:
self._feature_dict = {feature.uid: 0}
self._features = [feature, ]
else:
self._feature_dict[feature.uid] = len(self._features)
self._features.append(feature)
def __getitem__(self, item):
# type: (Any) -> Union[RCSFeature, List[RCSFeature]]
if self._features is None:
raise StopIteration
if isinstance(item, str):
index = self._feature_dict[item]
return self._features[index]
return self._features[item]
###########
# serialized file object
class FileRCSCollection(FileAnnotationCollection):
"""
An collection of RCS statistics elements.
"""
_type = 'FileRCSCollection'
def __init__(self, version=None, annotations=None, image_file_name=None,
image_id=None, core_name=None):
if version is None:
version = _RCS_VERSION
FileAnnotationCollection.__init__(
self, version=version, annotations=annotations, image_file_name=image_file_name,
image_id=image_id, core_name=core_name)
@property
def annotations(self):
"""
The annotations.
Returns
-------
RCSCollection
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
# type: (Union[None, RCSCollection, dict]) -> None
if annotations is None:
self._annotations = None
return
if isinstance(annotations, RCSCollection):
self._annotations = annotations
elif isinstance(annotations, dict):
self._annotations = RCSCollection.from_dict(annotations)
else:
raise TypeError(
'annotations must be an RCSCollection. Got type {}'.format(type(annotations)))
def add_annotation(self, annotation):
"""
Add an annotation.
Parameters
----------
annotation : RCSFeature
The prospective annotation.
"""
if isinstance(annotation, dict):
annotation = RCSFeature.from_dict(annotation)
if not isinstance(annotation, RCSFeature):
raise TypeError('This requires an RCSFeature instance. Got {}'.format(type(annotation)))
if self._annotations is None:
self._annotations = RCSCollection()
self._annotations.add_feature(annotation)
def delete_annotation(self, annotation_id):
"""
Deletes the annotation associated with the given id.
Parameters
----------
annotation_id : str
"""
del self._annotations[annotation_id]
@classmethod
def from_file(cls, file_name):
"""
Read from (json) file.
Parameters
----------
file_name : str
Returns
-------
FileRCSCollection
"""
with open(file_name, 'r') as fi:
the_dict = json.load(fi)
return cls.from_dict(the_dict)
@classmethod
def from_dict(cls, the_dict):
"""
Define from a dictionary representation.
Parameters
----------
the_dict : dict
Returns
-------
FileRCSCollection
"""
if not isinstance(the_dict, dict):
raise TypeError('This requires a dict. Got type {}'.format(type(the_dict)))
typ = the_dict.get('type', 'NONE')
if typ != cls._type:
raise ValueError('FileRCSCollection cannot be constructed from the input dictionary')
return cls(
version=the_dict.get('version', 'UNKNOWN'),
annotations=the_dict.get('annotations', None),
image_file_name=the_dict.get('image_file_name', None),
image_id=the_dict.get('image_id', None),
core_name=the_dict.get('core_name', None))
| 24,310 | 30.654948 | 120 | py |
sarpy | sarpy-master/sarpy/annotation/label.py | """
This module provides structures for performing data labelling on a background image
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import time
from collections import OrderedDict
import json
from typing import Union, List, Tuple, Any, Dict, Optional
from datetime import datetime
import getpass
from sarpy.annotation.base import AnnotationProperties, FileAnnotationCollection, \
AnnotationFeature, AnnotationCollection
from sarpy.geometry.geometry_elements import Jsonable, Geometry, Point, MultiPoint, \
LineString, MultiLineString, Polygon, MultiPolygon, GeometryCollection
_LABEL_VERSION = "Label:1.0"
logger = logging.getLogger(__name__)
POSSIBLE_GEOMETRIES = ('point', 'line', 'polygon')
class LabelSchema(object):
"""
The basic structure for an annotation/labelling schema.
The label names may certainly be modified in place through use of the `labels`
property, without worry for causing errors. This is discouraged, because having two
schemas with same version number/ids and differing names can likely lead to confusion.
Any modification of label ids of sub-id structure should be performed by using the
:func:`set_labels_and_subtypes` method, or difficult to diagnose runtime errors
will likely be introduced.
"""
__slots__ = (
'_version', '_labels', '_classification', '_version_date', '_subtypes',
'_parent_types', '_confidence_values', '_permitted_geometries',
'_integer_ids', '_maximum_id')
def __init__(
self,
version: Optional[str] = '1.0',
labels: Optional[Dict[str, str]] = None,
version_date: Optional[str] = None,
classification: str = "UNCLASSIFIED",
subtypes: Optional[Dict[str, List[str]]] = None,
confidence_values: Optional[List[Union[int, str]]] = None,
permitted_geometries: Optional[List[str]] = None):
"""
Parameters
----------
version : None|str
The version of the schema.
labels : None|Dict[str, str]
The {<label id> : <label name>} pair dictionary. Each entry must be a string,
and '' is not a valid label id.
version_date : None|str
The date for this schema. If `None`, then the current time will be used.
classification : str
The classification for this schema.
subtypes : None|Dict[str, List[str]]
The {<label id> : <sub id list>} pairs. The root ids (i.e. those ids
not belonging as sub-id to some other id) will be populated in the subtypes
entry with empty string key (i.e. ''). Every key and entry of subtypes
(excluding the subtypes root '') must correspond to an entry of labels,
and no id can be a direct subtype of more than one id.
confidence_values : None|List[Union[str, int]]
The possible confidence values.
permitted_geometries : None|List[str]
The possible geometry types.
"""
self._version_date = None
self._labels = None
self._subtypes = None
self._parent_types = None
self._confidence_values = None
self._permitted_geometries = None
self._integer_ids = True
self._maximum_id = None # type: Union[None, int]
self._version = version
self.update_version_date(value=version_date)
self._classification = classification
self.confidence_values = confidence_values
self.permitted_geometries = permitted_geometries
self.set_labels_and_subtypes(labels, subtypes)
@property
def version(self) -> str:
"""
The version of the schema.
Returns
-------
str
"""
return self._version
@property
def version_date(self) -> str:
"""
The date for this schema version - this should be a viable datetime format,
but this is unenforced.
Returns
-------
str
"""
return self._version_date
def update_version_date(self, value: Optional[str] = None):
if isinstance(value, str):
self._version_date = value
else:
self._version_date = datetime.utcnow().isoformat('T')+'Z'
@property
def classification(self) -> str:
"""
str: The classification for the contents of this schema.
"""
return self._classification
@property
def suggested_next_id(self) -> Optional[int]:
"""
None|int: If all ids are integer type, this returns max_id+1. Otherwise, this
yields None.
"""
return None if self._maximum_id is None else self._maximum_id + 1
@property
def labels(self) -> Dict[str, str]:
"""
The complete label dictionary of the form `{label_id : label_name}`.
Returns
-------
Dict[str, str]
"""
return self._labels
@property
def subtypes(self) -> Dict[str, List[str]]:
"""
The complete dictionary of subtypes of the form `{parent_id : <subids list>}`.
Returns
-------
Dict[str, List[str]]
"""
return self._subtypes
@property
def parent_types(self) -> Dict[str, List[str]]:
"""
The dictionary of parent types of the form `{child_id : <set of parent ids>}`.
It is canonically defined that an id is a parent of itself. The order of
the `parent_ids` list is ascending order of parentage, i.e.
`[<self>, <parent>, <parent of parent>, ...]`.
Returns
-------
Dict[str, List[str]]
"""
return self._parent_types
@property
def confidence_values(self) -> List[Union[int, str]]:
"""
The list of confidence values.
Returns
-------
List
Each element should be a json type (most likely use cases are str or int).
"""
return self._confidence_values
@confidence_values.setter
def confidence_values(self, conf_values):
if conf_values is None:
self._confidence_values = None
return
if not isinstance(conf_values, list):
raise TypeError('confidence_values must be a list. Got type {}'.format(type(conf_values)))
self._confidence_values = conf_values
@property
def permitted_geometries(self) -> Optional[List[str]]:
"""
The collection of permitted geometry types. None corresponds to all.
Entries should be one of `{'point', 'line', 'polygon'}`.
Returns
-------
None|List[str]
"""
return self._permitted_geometries
@permitted_geometries.setter
def permitted_geometries(self, values):
if values is None:
self._permitted_geometries = None
return
if isinstance(values, str):
values = [values.lower().strip(), ]
else:
values = [entry.lower().strip() for entry in values]
if len(values) == 0:
self._permitted_geometries = None
return
temp_values = []
for entry in values:
if entry in temp_values:
continue
if entry not in POSSIBLE_GEOMETRIES:
raise ValueError('Got unknown geometry value `{}`'.format(entry))
temp_values.append(entry)
self._permitted_geometries = temp_values
def get_id_from_name(self, the_name: str) -> Optional[str]:
"""
Determine the id from the given name. Get `None` if this fails.
Parameters
----------
the_name : str
Returns
-------
None|str
"""
prospective = None
for key, value in self.labels.items():
if value == the_name:
prospective = key
break
return prospective
def get_parent(self, the_id: str) -> str:
"""
Get the parent id for the given element id. The empty string is returned
for elements with no parent.
Parameters
----------
the_id : str
Returns
-------
str
"""
parents = self.parent_types[the_id]
return parents[1] if len(parents) > 1 else ''
def __str__(self) -> str:
return json.dumps(self.to_dict(), indent=1)
def __repr__(self) -> str:
return json.dumps(self.to_dict())
def _inspect_new_id_for_integer(self, the_id: Union[int, str]) -> None:
if not self._integer_ids:
return # nothing to do
if isinstance(the_id, str):
# noinspection PyBroadException
try:
the_id = int(the_id)
except Exception:
self._integer_ids = False
self._maximum_id = None
if isinstance(the_id, int):
# noinspection PyTypeChecker
self._maximum_id = the_id if self._maximum_id is None else \
max(self._maximum_id, the_id)
else:
self._integer_ids = False
self._maximum_id = None
def _inspect_ids_for_integer(self) -> None:
for the_id in self._labels:
self._inspect_new_id_for_integer(the_id)
@staticmethod
def _find_inverted_fork(
subtypes: Dict[str, List[str]],
labels: Dict[str, str]) -> Dict[str, List[str]]:
"""
Look for parents claiming the same child. This assigns all unclaimed children
to '' parent.
Parameters
----------
subtypes : dict
labels : dict
Returns
-------
dict
"""
# we need to check the reference count for each key in labels
counts = OrderedDict((key, 0) for key in labels)
# ensure that every key of subtypes is a string and every value is a list,
# also that inclusion makes sense
for key, value in subtypes.items():
if not isinstance(key, str):
raise TypeError(
'All keys of subtypes must be of type string. Got key `{}` of '
'type {}.'.format(key, type(key)))
if key != '' and key not in labels:
raise KeyError(
'All keys of subtypes must belong to labels. Got key `{}` '
'which is missing from labels.'.format(key))
if not isinstance(value, list):
raise TypeError(
'All values of subtypes must be of type `list`. Got value {} '
'for key `{}` of type {}'.format(value, key, type(value)))
for entry in value:
if entry not in labels:
raise KeyError(
'All entries for each value of subtypes must belong to labels. '
'Got entry `{}` in key `{}` which is missing from labels.'.format(entry, key))
counts[entry] += 1
# create the root entry for subtypes
if '' not in subtypes:
subtypes[''] = []
if isinstance(subtypes, OrderedDict):
subtypes.move_to_end('', last=False)
for key in counts:
value = counts[key]
if value > 1:
raise ValueError('key {} is referenced in more than one subtype. This is invalid.'.format(key))
if value == 0:
subtypes[''].append(key)
return subtypes
@staticmethod
def _find_cycle(subtypes: Dict[str, List[str]]) -> None:
"""
Find any cycles in the data.
Parameters
----------
subtypes
Returns
-------
None
"""
found_cycles = []
def iterate(current_id, find_id):
for t_entry in subtypes.get(current_id, []):
if t_entry == find_id:
found_cycles.append((find_id, current_id))
iterate(t_entry, find_id)
for the_id in subtypes['']:
iterate(the_id, the_id)
if len(found_cycles) > 0:
for entry in found_cycles:
logger.error(
'Cycle found with ids {} and {}'.format(entry[0], entry[1]))
raise ValueError('cycles found in graph information')
def set_labels_and_subtypes(
self,
labels: Dict[str, str],
subtypes: Dict[str, List[str]]) -> None:
"""
Set the labels and subtypes. **Note that subtypes may be modified in place.**
Parameters
----------
labels : None|dict
subtypes : None|dict
Returns
-------
None
"""
if labels is None:
labels = OrderedDict()
if not isinstance(labels, dict):
raise TypeError('labels is required to be a dict. Got type {}'.format(type(labels)))
if subtypes is None:
subtypes = OrderedDict()
elif not isinstance(subtypes, dict):
raise TypeError('subtypes is required to be None or a dict. Got type {}'.format(type(subtypes)))
# ensure that every key and value of labels are strings
for key in labels:
if not isinstance(key, str):
raise TypeError(
'All keys of labels must be of type string. Got key `{}` of '
'type {}'.format(key, type(key)))
if key == '':
raise ValueError('The empty string is not a valid label id.')
value = labels[key]
if not isinstance(value, str):
raise TypeError(
'All values of labels must be of type string. Got value {} '
'for key `{}` of type {}'.format(value, key, type(value)))
# look for inverted fork - multiple parents claiming the same child
subtypes = self._find_inverted_fork(subtypes, labels)
# look for cycles
self._find_cycle(subtypes)
# set the values
self._labels = labels
self._subtypes = subtypes
self._construct_parent_types()
self._inspect_ids_for_integer()
def _construct_parent_types(self) -> None:
def iterate(t_key, parents):
entry = [t_key, ]
# noinspection PyUnresolvedReferences
entry.extend(parents)
self._parent_types[t_key] = entry
if t_key not in self._subtypes:
return
for child_key in self._subtypes[t_key]:
iterate(child_key, entry)
self._parent_types = {}
for key in self._subtypes['']:
iterate(key, [])
def _validate_entry(
self,
the_id: str,
the_name: str,
the_parent: str) -> Tuple[str, str, str]:
"""
Validate the basics for the given entry.
Parameters
----------
the_id : str
the_name : str
the_parent : str
Returns
-------
the_id: str
the_name: str
the_parent: str
"""
# validate inputs
if not (isinstance(the_id, str) and isinstance(the_name, str) and isinstance(the_parent, str)):
raise TypeError(
'the_id, the_name, and the_parent must all be string type, got '
'types {}, {}, {}'.format(type(the_id), type(the_name), type(the_parent)))
the_id = the_id.strip()
the_name = the_name.strip()
the_parent = the_parent.strip()
# verify that values are permitted and sensible
if the_id == '':
raise ValueError('the_id value `` is reserved.')
if the_name == '':
raise ValueError('the_name value `` is not permitted.')
if the_id == the_parent:
raise ValueError('the_id cannot be the same as the_parent.')
# try to determine parent from name if not a valid id
if the_parent != '' and the_parent not in self.labels:
prospective_parent = self.get_id_from_name(the_parent)
if prospective_parent is None:
raise ValueError('the_parent {} matches neither an existing id or name.'.format(the_parent))
the_parent = prospective_parent
return the_id, the_name, the_parent
def add_entry(
self,
the_id: str,
the_name: str,
the_parent: str = '') -> None:
"""
Adds a new entry. Note that leading or trailing blanks will be trimmed
from all input values.
Parameters
----------
the_id : str
The id for the label.
the_name : str
The name for the label.
the_parent : str
The parent id, where blank denotes no parent.
Returns
-------
None
"""
# validate inputs
the_id, the_name, the_parent = self._validate_entry(the_id, the_name, the_parent)
# verify that the_id doesn't already exist
if the_id in self.labels:
raise KeyError('the_id = {} already exists'.format(the_id))
# check if name is already being used, and warn if so
for key, value in self.labels.items():
if value == the_name:
logger.warning(
'Note that id {} is already using name {}. Having repeated names is '
'permitted, but may lead to confusion.'.format(key, value))
# add the entry into the labels and subtypes dicts and reset the values
# perform copy in case of failure
labels = self.labels.copy()
subtypes = self.subtypes.copy()
labels[the_id] = the_name
if the_parent in subtypes:
subtypes[the_parent].append(the_id)
else:
subtypes[the_parent] = [the_id, ]
try:
self.set_labels_and_subtypes(labels, subtypes)
except (ValueError, KeyError) as e:
logger.error(
'Setting new entry id {}, name {}, and parent {} failed with '
'exception {}'.format(the_id, the_name, the_parent, e))
def change_entry(
self,
the_id: str,
the_name: str,
the_parent: str) -> bool:
"""
Modify the values for a schema element.
Parameters
----------
the_id : str
the_name : str
the_parent : str
Returns
-------
bool
True if anything was actually changed. False otherwise.
"""
# validate inputs
the_id, the_name, the_parent = self._validate_entry(the_id, the_name, the_parent)
# verify that the_id does already exist
if the_id not in self.labels:
raise KeyError('the_id = {} does not exist'.format(the_id))
# check current values
current_name = self.labels[the_id]
current_parents = self.parent_types[the_id]
current_parent = current_parents[1] if len(current_parents) > 1 else ''
if current_name == the_name and current_parent == the_parent:
# nothing is changing
return False
# check if name is already being used by a different element, and warn if so
if current_name != the_name:
for key, value in self.labels.items():
if value == the_name and key != the_id:
logger.warning(
'Note that id {} is already using name {}. Having repeated names is '
'permitted, but may lead to confusion.'.format(key, value))
if current_parent != the_parent:
labels = self.labels.copy()
labels[the_id] = the_name
subtypes = self.subtypes.copy()
# remove the_id from it's current subtype
subtypes[current_parent].remove(the_id)
# add it to the new one
if the_parent in subtypes:
subtypes[the_parent].append(the_id)
else:
subtypes[the_parent] = [the_id, ]
try:
self.set_labels_and_subtypes(labels, subtypes)
except (ValueError, KeyError) as e:
logger.error(
'Modifying entry id {}, name {}, and parent {} failed with '
'exception {}.'.format(the_id, the_name, the_parent, e))
raise e
else:
# just changing the name
self.labels[the_id] = the_name
return True
def delete_entry(
self,
the_id: str,
recursive: bool = False) -> None:
"""
Deletes the entry from the schema.
If the given element has children and `recursive=False`, a ValueError
will be raised. If the given element has children and `recursive=True`,
then all children will be deleted.
Parameters
----------
the_id : str
recursive : bool
"""
if the_id in self._subtypes:
# handle all the children
children = self.subtypes[the_id]
if children is not None and len(children) > 0:
if not recursive:
raise ValueError(
'LabelSchema entry for id {} has children. Either move children to a '
'different parent, or make recursive=True to delete all children.'.format(the_id))
the_children = children.copy() # unsafe to loop over a changing list
for entry in the_children:
self.delete_entry(entry, recursive=True)
# now, all the children have been deleted.
del self._subtypes[the_id]
# remove the entry from the parent's subtypes list
parent_id = self.get_parent(the_id)
self.subtypes[parent_id].remove(parent_id)
# remove entry from labels
del self._labels[the_id]
del self._parent_types[the_id]
def reorder_child_element(
self,
the_id: str,
spaces: int = 1) -> bool:
"""
Move the one space (forward or backward) in the list of children for the
current parent. This is explicitly changes no actual parent/child
relationships, and only changes the child list ORDERING.
Parameters
----------
the_id : str
spaces : int
How many spaces to shift the entry.
Returns
-------
bool
True of something actually changed, False otherwise.
"""
if the_id not in self._labels:
raise KeyError('No id {}'.format(the_id))
parent_id = self.get_parent(the_id)
children = self.subtypes[parent_id]
# get the current location
current_index = children.index(the_id)
# determine the feasible new location
if spaces < 0:
new_index = max(0, current_index + spaces)
else:
new_index = min(len(children) - 1, current_index + spaces)
if current_index == new_index:
return False # nothing to be done
# pop our entry out of its current location
children.pop(current_index)
# insert it in its new location
children.insert(new_index, the_id)
return True
@classmethod
def from_file(cls, file_name: str):
"""
Read schema from a file.
Parameters
----------
file_name : str
Returns
-------
LabelSchema
"""
with open(file_name, 'r') as fi:
input_dict = json.load(fi)
return cls.from_dict(input_dict)
@classmethod
def from_dict(cls, input_dict: Dict):
"""
Construct from a dictionary.
Parameters
----------
input_dict : dict
Returns
-------
LabelSchema
"""
version = input_dict['version']
labels = input_dict['labels']
version_date = input_dict.get('version_date', None)
classification = input_dict.get('classification', 'UNCLASSIFIED')
subtypes = input_dict.get('subtypes', None)
conf_values = input_dict.get('confidence_values', None)
perm_geometries = input_dict.get('permitted_geometries', None)
return cls(
version, labels, version_date=version_date, classification=classification,
subtypes=subtypes, confidence_values=conf_values, permitted_geometries=perm_geometries)
def to_dict(self) -> Dict:
"""
Serialize to a dictionary representation.
Returns
-------
dict
"""
out = OrderedDict()
out['version'] = self.version
out['version_date'] = self.version_date
out['classification'] = self.classification
if self.confidence_values is not None:
out['confidence_values'] = self.confidence_values
if self.permitted_geometries is not None:
out['permitted_geometries'] = self.permitted_geometries
out['labels'] = self._labels
out['subtypes'] = self._subtypes
return out
def to_file(self, file_name: str) -> None:
"""
Write to a (json) file.
Parameters
----------
file_name : str
Returns
-------
None
"""
with open(file_name, 'w') as fi:
json.dump(self.to_dict(), fi, indent=1)
def is_valid_confidence(self, value: List) -> bool:
"""
Is the given value a valid confidence (i.e. is in `confidence_values`)?
Note that `None` is always considered valid here.
Parameters
----------
value
Returns
-------
bool
"""
if self._confidence_values is None or value is None:
return True
else:
return value in self._confidence_values
def is_valid_geometry(self, value: List) -> bool:
"""
Is the given geometry type allowed (i.e. is in `permitted_geometries`)?
Note that `None` is always considered valid here.
Parameters
----------
value : str|Geometry
Returns
-------
bool
"""
def check_geom(geom):
if isinstance(geom, (Point, MultiPoint)):
out = 'point' in self._permitted_geometries
if not out:
logger.error('Not allowed point type geometry components')
return out
elif isinstance(geom, (LineString, MultiLineString)):
out = 'line' in self._permitted_geometries
if not out:
logger.error('Not allowed line type geometry components')
return out
elif isinstance(geom, (Polygon, MultiPolygon)):
out = 'polygon' in self._permitted_geometries
if not out:
logger.error('Not allowed polygon type geometry components')
return out
elif isinstance(geom, GeometryCollection):
out = True
for entry in geom.geometries:
out &= check_geom(entry)
return out
else:
raise TypeError('Got unexpected geometry type `{}`'.format(type(geom)))
if self._permitted_geometries is None or value is None:
return True
if isinstance(value, str):
return value.lower().strip() in self._permitted_geometries
if not isinstance(value, Geometry):
raise TypeError('Got unexpected geometry type `{}`'.format(type(value)))
return check_geom(value)
##########
# elements for labeling a feature
class LabelMetadata(Jsonable):
"""
Basic annotation metadata building block - everything but the geometry object
"""
__slots__ = ('label_id', 'user_id', 'comment', 'confidence', 'timestamp')
_type = 'LabelMetadata'
def __init__(
self,
label_id: Optional[str] = None,
user_id: Optional[str] = None,
comment: Optional[str] = None,
confidence: Union[None, int, str] = None,
timestamp: Union[None, int, float] = None):
"""
Parameters
----------
label_id : None|str
The label id
user_id : None|str
The user id - will default to current user name
comment : None|str
confidence : None|str|int
The confidence value
timestamp : None|float|int
The POSIX timestamp (in seconds) - should be construction time.
"""
self.label_id = label_id # type: Union[None, str]
if user_id is None:
user_id = getpass.getuser()
self.user_id = user_id # type: str
self.comment = comment # type: Union[None, str]
self.confidence = confidence # type: Union[None, str, int]
if timestamp is None:
timestamp = time.time()
if not isinstance(timestamp, float):
timestamp = float(timestamp)
self.timestamp = timestamp # type: float
@classmethod
def from_dict(cls, the_json: Dict):
typ = the_json['type']
if typ != cls._type:
raise ValueError('LabelMetadata cannot be constructed from {}'.format(the_json))
return cls(
label_id=the_json.get('label_id', None),
user_id=the_json.get('user_id', None),
comment=the_json.get('comment', None),
confidence=the_json.get('confidence', None),
timestamp=the_json.get('timestamp', None))
def to_dict(self, parent_dict: Optional[Dict] = None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
for attr in self.__slots__:
parent_dict[attr] = getattr(self, attr)
return parent_dict
def replicate(self):
kwargs = {}
for attr in self.__slots__:
if attr not in ['user_id', 'timestamp']:
kwargs[attr] = getattr(self, attr)
the_type = self.__class__
return the_type(**kwargs)
class LabelMetadataList(Jsonable):
"""
The collection of LabelMetadata elements.
"""
__slots__ = ('_elements', )
_type = 'LabelMetadataList'
def __init__(self, elements: Union[None, List[LabelMetadata], Dict] = None):
"""
Parameters
----------
elements : None|List[LabelMetadata|dict]
"""
self._elements = None
if elements is not None:
self.elements = elements
def __len__(self):
if self._elements is None:
return 0
return len(self._elements)
def __getitem__(self, item):
# type: (Any) -> LabelMetadata
if self._elements is None:
raise StopIteration
return self._elements[item]
@property
def elements(self) -> Optional[List[LabelMetadata]]:
"""
The LabelMetadata elements.
Returns
-------
None|List[LabelMetadata]
"""
return self._elements
@elements.setter
def elements(self, elements):
if elements is None:
self._elements = None
if not isinstance(elements, list):
raise TypeError('elements must be a list of LabelMetadata elements')
self._elements = []
for element in elements:
self.insert_new_element(element)
def insert_new_element(self, element: LabelMetadata) -> None:
"""
Inserts an element at the head of the elements list.
Parameters
----------
element : LabelMetadata
Returns
-------
None
"""
if isinstance(element, dict):
element = LabelMetadata.from_dict(element)
if not isinstance(element, LabelMetadata):
raise TypeError('element must be an LabelMetadata instance, got type {}'.format(type(element)))
if self._elements is None:
self._elements = [element, ]
elif len(self._elements) == 0:
self._elements.append(element)
else:
for i, entry in enumerate(self._elements):
if element.timestamp > entry.timestamp:
self._elements.insert(i, element)
break
@classmethod
def from_dict(cls, the_json):
# type: (dict) -> LabelMetadataList
typ = the_json['type']
if typ != cls._type:
raise ValueError('LabelMetadataList cannot be constructed from {}'.format(the_json))
return cls(elements=the_json.get('elements', None))
def to_dict(self, parent_dict: Optional[Dict] = None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
if self._elements is None:
parent_dict['elements'] = None
else:
parent_dict['elements'] = [entry.to_dict() for entry in self._elements]
return parent_dict
def replicate(self): # type: () -> LabelMetadataList
kwargs = {}
elements = self.elements
if elements is not None:
kwargs['elements'] = [elements[0].replicate()]
the_type = self.__class__
return the_type(**kwargs)
def get_label_id(self) -> Optional[str]:
"""
Gets the current label id.
Returns
-------
None|str
"""
return None if (self.elements is None or len(self.elements) == 0) else self.elements[0].label_id
class LabelProperties(AnnotationProperties):
_type = 'LabelProperties'
@property
def parameters(self):
"""
LabelMetadataList: The parameters
"""
return self._parameters
@parameters.setter
def parameters(self, value):
if value is None:
self._parameters = LabelMetadataList()
return
if isinstance(value, dict):
self._parameters = LabelMetadataList.from_dict(value)
return
if isinstance(value, LabelMetadataList):
self._parameters = value
return
raise TypeError('Got unexpected type for parameters `{}`'.format(type(value)))
def get_label_id(self):
"""
Gets the current label id.
Returns
-------
None|str
"""
return None if self.parameters is None else self.parameters.get_label_id()
############
# the feature extensions
class LabelFeature(AnnotationFeature):
"""
A specific extension of the Feature class which has the properties attribute
populated with LabelProperties instance.
"""
@property
def properties(self):
"""
The properties.
Returns
-------
None|LabelProperties
"""
return self._properties
@properties.setter
def properties(self, properties):
if properties is None:
self._properties = LabelProperties()
return
if isinstance(properties, dict):
self._properties = LabelProperties.from_dict(properties)
return
if isinstance(properties, LabelProperties):
self._properties = properties
return
raise TypeError('properties must be an LabelProperties')
def add_annotation_metadata(self, value):
"""
Adds the new label to the series of labeling efforts.
Parameters
----------
value : LabelMetadata
"""
if self._properties is None:
self._properties = LabelProperties()
self._properties.parameters.insert_new_element(value)
def get_label_id(self):
"""
Gets the label id.
Returns
-------
None|str
"""
return None if self.properties is None else self.properties.get_label_id()
class LabelCollection(AnnotationCollection):
"""
A specific extension of the FeatureCollection class which has the features are
LabelFeature instances.
"""
@property
def features(self):
"""
The features list.
Returns
-------
List[LabelFeature]
"""
return self._features
@features.setter
def features(self, features):
if features is None:
self._features = None
self._feature_dict = None
return
if not isinstance(features, list):
raise TypeError(
'features must be a list of LabelFeatures. '
'Got {}'.format(type(features)))
for entry in features:
self.add_feature(entry)
def add_feature(self, feature):
"""
Add an annotation.
Parameters
----------
feature : LabelFeature
"""
if isinstance(feature, dict):
feature = LabelFeature.from_dict(feature)
if not isinstance(feature, LabelFeature):
raise TypeError('This requires an LabelFeature instance, got {}'.format(type(feature)))
if self._features is None:
self._feature_dict = {feature.uid: 0}
self._features = [feature, ]
else:
self._feature_dict[feature.uid] = len(self._features)
self._features.append(feature)
def __getitem__(self, item):
# type: (Any) -> Union[LabelFeature, List[LabelFeature]]
if self._features is None:
raise StopIteration
if isinstance(item, str):
index = self._feature_dict[item]
return self._features[index]
return self._features[item]
###########
# serialized file object
class FileLabelCollection(FileAnnotationCollection):
"""
An collection of annotation elements associated with a given single image element file.
"""
__slots__ = (
'_version', '_label_schema', '_image_file_name', '_image_id', '_core_name', '_annotations')
_type = 'FileLabelCollection'
def __init__(self, label_schema, version=None, annotations=None,
image_file_name=None, image_id=None, core_name=None):
if version is None:
version = _LABEL_VERSION
if isinstance(label_schema, str):
label_schema = LabelSchema.from_file(label_schema)
elif isinstance(label_schema, dict):
label_schema = LabelSchema.from_dict(label_schema)
if not isinstance(label_schema, LabelSchema):
raise TypeError('label_schema must be an instance of a LabelSchema.')
self._label_schema = label_schema
FileAnnotationCollection.__init__(
self, version=version, annotations=annotations, image_file_name=image_file_name,
image_id=image_id, core_name=core_name)
@property
def label_schema(self):
"""
The label schema.
Returns
-------
LabelSchema
"""
return self._label_schema
@property
def annotations(self):
"""
The annotations.
Returns
-------
LabelCollection
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
# type: (Union[None, LabelCollection, dict]) -> None
if annotations is None:
self._annotations = None
return
if isinstance(annotations, LabelCollection):
self._annotations = annotations
elif isinstance(annotations, dict):
self._annotations = LabelCollection.from_dict(annotations)
else:
raise TypeError(
'annotations must be an LabelCollection. Got type {}'.format(type(annotations)))
self.validate_annotations(strict=False)
def add_annotation(self, annotation, validate_confidence=True, validate_geometry=True):
"""
Add an annotation, with a check for valid values in confidence and geometry type.
Parameters
----------
annotation : LabelFeature
The prospective annotation.
validate_confidence : bool
Should we check that all confidence values follow the schema?
validate_geometry : bool
Should we check that all geometries are of allowed type?
Returns
-------
None
"""
if not isinstance(annotation, LabelFeature):
raise TypeError('This requires an LabelFeature instance. Got {}'.format(type(annotation)))
if self._annotations is None:
self._annotations = LabelCollection()
valid = True
if validate_confidence:
valid &= self._valid_confidences(annotation)
if validate_geometry:
valid &= self._valid_geometry(annotation)
if not valid:
raise ValueError('LabelFeature does not follow the schema.')
self._annotations.add_feature(annotation)
def is_annotation_valid(self, annotation):
"""
Is the given annotation valid according to the schema?
Parameters
----------
annotation : LabelFeature
Returns
-------
bool
"""
if not isinstance(annotation, LabelFeature):
return False
if self._label_schema is None:
return True
valid = self._valid_confidences(annotation)
valid &= self._valid_geometry(annotation)
return valid
def _valid_confidences(self, annotation):
if self._label_schema is None:
return True
if annotation.properties is None or annotation.properties.parameters is None:
return True
valid = True
for entry in annotation.properties.parameters:
if not self._label_schema.is_valid_confidence(entry.confidence):
valid = False
logger.error('Invalid confidence value {}'.format(entry.confidence))
return valid
def _valid_geometry(self, annotation):
if self._label_schema is None:
return True
if not self._label_schema.is_valid_geometry(annotation.geometry):
logger.error('Invalid geometry type {}'.format(type(annotation.geometry)))
return False
return True
def validate_annotations(self, strict=True):
if self._annotations is None:
return True
valid = True
for entry in self._annotations:
valid &= self.is_annotation_valid(entry)
if strict and not valid:
raise ValueError('Some annotation does not follow the schema.')
return valid
@classmethod
def from_file(cls, file_name):
"""
Read from (json) file.
Parameters
----------
file_name : str
Returns
-------
FileLabelCollection
"""
with open(file_name, 'r') as fi:
the_dict = json.load(fi)
return cls.from_dict(the_dict)
@classmethod
def from_dict(cls, the_dict):
"""
Define from a dictionary representation.
Parameters
----------
the_dict : dict
Returns
-------
FileLabelCollection
"""
if not isinstance(the_dict, dict):
raise TypeError('This requires a dict. Got type {}'.format(type(the_dict)))
if 'label_schema' not in the_dict:
raise KeyError('this dictionary must contain a label_schema')
typ = the_dict.get('type', 'NONE')
if typ != cls._type:
raise ValueError('FileLabelCollection cannot be constructed from the input dictionary')
return cls(
the_dict['label_schema'],
version=the_dict.get('version', 'UNKNOWN'),
annotations=the_dict.get('annotations', None),
image_file_name=the_dict.get('image_file_name', None),
image_id=the_dict.get('image_id', None),
core_name=the_dict.get('core_name', None))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['version'] = self.version
parent_dict['label_schema'] = self.label_schema.to_dict()
if self.image_file_name is not None:
parent_dict['image_file_name'] = self.image_file_name
if self.image_id is not None:
parent_dict['image_id'] = self.image_id
if self.core_name is not None:
parent_dict['core_name'] = self.core_name
if self.annotations is not None:
parent_dict['annotations'] = self.annotations.to_dict()
return parent_dict
| 45,023 | 30.463312 | 111 | py |
sarpy | sarpy-master/sarpy/annotation/__init__.py |
__classification__ = "UNCLASSIFIED"
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_schema/__init__.py | """
This package contains the AFRL RDE schema
"""
__classification__ = 'UNCLASSIFIED'
import pkg_resources
def get_schema_path(version='1.0.0'):
"""
Location of AFRL/RDE schema file.
Returns
-------
str
The path to the ARFL/RDE schema.
"""
if version == '1.0.0':
return pkg_resources.resource_filename(
'sarpy.annotation.afrl_rde_schema', 'afrl_rde_schema_v1.0.0_2022-02-15.xsd')
else:
raise ValueError('Got unrecognized version {}'.format(version))
| 524 | 20 | 88 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/SensorInfo.py | """
Definition for the SensorInfo NGA modified RDE/AFRL labeling object
"""
__classification__ = "UNCLASSIFIED"
__authors__ = "Thomas McCullough"
from typing import Optional
import numpy
from sarpy.io.xml.base import Serializable, Arrayable
from sarpy.io.xml.descriptors import SerializableDescriptor, StringDescriptor, \
StringEnumDescriptor, FloatDescriptor
from sarpy.io.complex.sicd_elements.blocks import XYZType
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.geometry.geocoords import ecf_to_geodetic, ecf_to_ned
from .blocks import LatLonEleType
class BeamWidthType(Serializable, Arrayable):
_fields = ('Azimuth', 'Elevation')
_required = _fields
_numeric_format = {key: '0.17G' for key in _fields}
# Descriptors
Azimuth = FloatDescriptor(
'Azimuth', _required, strict=True, docstring='The Azimuth attribute.') # type: float
Elevation = FloatDescriptor(
'Elevation', _required, strict=True, docstring='The Elevation attribute.') # type: float
def __init__(self, Azimuth=None, Elevation=None, **kwargs):
"""
Parameters
----------
Azimuth : float
Elevation : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Azimuth, self.Elevation = Azimuth, Elevation
super(BeamWidthType, self).__init__(**kwargs)
def get_array(self, dtype='float64'):
"""
Gets an array representation of the class instance.
Parameters
----------
dtype : str|numpy.dtype|numpy.number
numpy data type of the return
Returns
-------
numpy.ndarray
array of the form [Azimuth, Elevation]
"""
return numpy.array([self.Azimuth, self.Elevation], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Azimuth, Elevation]
Returns
-------
BeamWidthType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 2:
raise ValueError('Expected array to be of length 2, and received {}'.format(array))
return cls(Azimuth=array[0], Elevation=array[1])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class SquintAngleType(Serializable):
_fields = ('GroundPlane', 'SlantPlane')
_required = _fields
_numeric_format = {el: '0.17G' for el in _fields}
# descriptor
GroundPlane = FloatDescriptor(
'GroundPlane', _required,
docstring='Measured angle between the sensor line-of-sight and the '
'lateral axis of the aircraft as projected into the'
'ground plane') # type: float
SlantPlane = FloatDescriptor(
'SlantPlane', _required,
docstring='Measured angle between the sensor line-of-sight and the '
'lateral axis of the aircraft as projected into the'
'slant plane') # type: float
def __init__(self, GroundPlane=None, SlantPlane=None, **kwargs):
"""
Parameters
----------
GroundPlane : float
SlantPlane : float
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.GroundPlane = GroundPlane
self.SlantPlane = SlantPlane
super(SquintAngleType, self).__init__(**kwargs)
class AircraftLocationType(Serializable, Arrayable):
"""A three-dimensional geographic point in WGS-84 coordinates."""
_fields = ('Lat', 'Lon', 'Altitude')
_required = _fields
_numeric_format = {'Lat': '0.17G', 'Lon': '0.17G', 'Altitude': '0.17G'}
# descriptors
Lat = FloatDescriptor(
'Lat', _required, strict=True,
docstring='The latitude attribute. Assumed to be WGS-84 coordinates.'
) # type: float
Lon = FloatDescriptor(
'Lon', _required, strict=True,
docstring='The longitude attribute. Assumed to be WGS-84 coordinates.'
) # type: float
Altitude = FloatDescriptor(
'Altitude', _required, strict=True,
docstring='The Height Above Ellipsoid (in meters) attribute. '
'Assumed to be WGS-84 coordinates.') # type: float
def __init__(self, Lat=None, Lon=None, Altitude=None, **kwargs):
"""
Parameters
----------
Lat : float
Lon : float
Altitude : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Lat = Lat
self.Lon = Lon
self.Altitude = Altitude
super(AircraftLocationType, self).__init__(**kwargs)
def get_array(self, dtype=numpy.float64):
"""
Gets an array representation of the data.
Parameters
----------
dtype : str|numpy.dtype|numpy.number
data type of the return
Returns
-------
numpy.ndarray
data array with appropriate entry order
"""
return numpy.array([self.Lat, self.Lon, self.Altitude], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Lat, Lon, Altitude]
Returns
-------
AircraftLocationType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 3:
raise ValueError('Expected array to be of length 3, and received {}'.format(array))
return cls(Lat=array[0], Lon=array[1], Altitude=array[2])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class SensorInfoType(Serializable):
_fields = (
'Name', 'SensorMfg', 'OperatingAgency', 'Type', 'Mode', 'Band',
'Bandwidth', 'CenterFrequency', 'NearRange', 'SlantRangeSwathWidth',
'Polarization', 'Range', 'DepressionAngle', 'LinearDynamicRange',
'BeamWidth', 'Aimpoint', 'AircraftHeading', 'AircraftTrackAngle', 'Look', 'SquintAngle',
'AircraftLocation', 'AircraftVelocity', 'FlightNumber', 'PassNumber')
_required = (
'Name', 'Type', 'Band', 'Bandwidth', 'CenterFrequency', 'Polarization', 'Range',
'DepressionAngle', 'Aimpoint', 'AircraftHeading', 'AircraftTrackAngle',
'Look', 'SquintAngle', 'AircraftLocation', 'AircraftVelocity')
_numeric_format = {
'Bandwidth': '0.17G', 'CenterFrequency': '0.17G', 'NearRange': '0.17G',
'SlantRangeSwathWidth': '0.17G', 'Range': '0.17G', 'DepressionAngle': '0.17G',
'LinearDynamicRange': '0.17G', 'AircraftHeading': '0.17G', 'AircraftTrackAngle': '0.17G', }
# descriptors
Name = StringDescriptor(
'Name', _required,
docstring='The name of the sensor') # type: str
SensorMfg = StringDescriptor(
'SensorMfg', _required,
docstring='The manufacturer of the sensor') # type: Optional[str]
OperatingAgency = StringDescriptor(
'OperatingAgency', _required,
docstring='The agency or company that operates the sensor') # type: Optional[str]
Type = StringDescriptor(
'Type', _required,
docstring='The type of sensor (i.e SAR or EO)') # type: str
Mode = StringDescriptor(
'Mode', _required,
docstring='Sensor operating mode') # type: Optional[str]
Band = StringDescriptor(
'Band', _required,
docstring='designation of the sensor frequency band') # type: str
Bandwidth = FloatDescriptor(
'Bandwidth', _required,
docstring='Radio Frequency bandwidth of the sensor system in GHz') # type: float
CenterFrequency = FloatDescriptor(
'CenterFrequency', _required,
docstring='Center operating frequency of the sensor system in GHz') # type: float
NearRange = FloatDescriptor(
'NearRange', _required,
docstring='The slant range distance measured from the sensor to the '
'near range of the image') # type: Optional[float]
SlantRangeSwathWidth = FloatDescriptor(
'SlantRangeSwathWidth', _required,
docstring='The width of the image as measured in the slant range'
) # type: Optional[float]
Polarization = StringDescriptor(
'Polarization', _required,
docstring='The polarization of the transmitted/received signals') # type: str
Range = FloatDescriptor(
'Range', _required,
docstring='Measured slant range between the sensor aperture '
'and the scene center') # type: float
DepressionAngle = FloatDescriptor(
'DepressionAngle', _required,
docstring='Measured depression angle between the sensor line-of-sight '
'and the local horizontal reference plane') # type: float
LinearDynamicRange = FloatDescriptor(
'LinearDynamicRange', _required,
docstring="The span of the signal amplitudes (or power levels) over "
"which the system's response is linear. Typically the ratio "
"of the largest input signal that causes a 1 db compression "
"in receiver dynamic gain and the minimum signal defined by "
"receiver noise.") # type: Optional[float]
BeamWidth = SerializableDescriptor(
'BeamWidth', BeamWidthType, _required,
docstring='The width of the radar beam at its half power'
) # type: Optional[BeamWidthType]
Aimpoint = SerializableDescriptor(
'Aimpoint', LatLonEleType, _required,
docstring='The sensor aim point') # type: LatLonEleType
AircraftHeading = FloatDescriptor(
'AircraftHeading', _required,
docstring='Aircraft heading relative to True North, in degrees'
) # type: float
AircraftTrackAngle = FloatDescriptor(
'AircraftTrackAngle', _required,
docstring='The bearing from the aircraft position at the first pulse '
'to the aircraft position at the last') # type: float
Look = StringEnumDescriptor(
'Look', {'Left', 'Right', 'Nadir'}, _required,
docstring='Direction of the sensor look angle relative to aircraft '
'motion') # type: str
SquintAngle = SerializableDescriptor(
'SquintAngle', SquintAngleType, _required,
docstring='Measured angle between the sensor line-of-sight and the '
'lateral axis of the aircraft') # type: SquintAngleType
AircraftLocation = SerializableDescriptor(
'AircraftLocation', AircraftLocationType, _required,
docstring='The aircraft location (at scene center COA time?)') # type: AircraftLocationType
AircraftVelocity = SerializableDescriptor(
'AircraftVelocity', XYZType, _required,
docstring='Aircraft velocity in ECEF coordinates (at scene center COA time?)') # type: XYZType
FlightNumber = StringDescriptor(
'FlightNumber', _required,
docstring='The aircraft flight number') # type: Optional[str]
PassNumber = StringDescriptor(
'PassNumber', _required,
docstring='The aircraft pass number') # type: Optional[str]
def __init__(self, Name=None, SensorMfg=None, OperatingAgency=None,
Type=None, Mode=None, Band=None, Bandwidth=None,
CenterFrequency=None, NearRange=None, SlantRangeSwathWidth=None,
Polarization=None, Range=None, DepressionAngle=None,
LinearDynamicRange=None, BeamWidth=None, Aimpoint=None,
AircraftHeading=None, AircraftTrackAngle=None,
Look=None, SquintAngle=None,
AircraftLocation=None, AircraftVelocity=None,
FlightNumber=None, PassNumber=None, **kwargs):
"""
Parameters
----------
Name : None|str
SensorMfg : None|str
OperatingAgency : None|str
Type : str
Mode : None|str
Band : None|str
Bandwidth : None|float
CenterFrequency : None|float
NearRange : None|float
SlantRangeSwathWidth : None|float
Polarization : None|str
Range : float
DepressionAngle : float
LinearDynamicRange : None|float
BeamWidth : BeamWidthType
Aimpoint : LatLonEleType|numpy.ndarray|list|tuple
AircraftHeading : None|float
AircraftTrackAngle : None|float
Look : str
SquintAngle : SquintAngleType
AircraftLocation : AircraftLocationType|numpy.ndarray|list|tuple
AircraftVelocity : XYZType|numpy.ndarray|list|tuple
FlightNumber : None|int
PassNumber : None|int
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Name = Name
self.SensorMfg = SensorMfg
self.OperatingAgency = OperatingAgency
self.Type = Type
self.Mode = Mode
self.Band = Band
self.Bandwidth = Bandwidth
self.CenterFrequency = CenterFrequency
self.NearRange = NearRange
self.SlantRangeSwathWidth = SlantRangeSwathWidth
self.Polarization = Polarization
self.Range = Range
self.DepressionAngle = DepressionAngle
self.LinearDynamicRange = LinearDynamicRange
self.BeamWidth = BeamWidth
self.Aimpoint = Aimpoint
self.AircraftHeading = AircraftHeading
self.AircraftTrackAngle = AircraftTrackAngle
self.Look = Look
self.SquintAngle = SquintAngle
self.AircraftLocation = AircraftLocation
self.AircraftVelocity = AircraftVelocity
self.FlightNumber = FlightNumber
self.PassNumber = PassNumber
super(SensorInfoType, self).__init__(**kwargs)
@classmethod
def from_sicd(cls, sicd):
"""
Construct the sensor info from a sicd structure
Parameters
----------
sicd : SICDType
Returns
-------
SensorInfoType
"""
transmit_freq_proc = sicd.ImageFormation.TxFrequencyProc
center_freq = transmit_freq_proc.center_frequency*1e-9
bandwidth = transmit_freq_proc.bandwidth*1e-9
polarization = sicd.ImageFormation.get_polarization().replace(':', '')
look = 'Left' if sicd.SCPCOA.SideOfTrack == 'L' else 'Right'
arp_pos_llh = ecf_to_geodetic(sicd.SCPCOA.ARPPos.get_array())
# calculate heading
heading_ned = ecf_to_ned(sicd.SCPCOA.ARPVel.get_array(), sicd.SCPCOA.ARPPos.get_array(), absolute_coords=False)
heading = numpy.rad2deg(numpy.arctan2(heading_ned[1], heading_ned[0]))
# calculate track angle
first_pos_ecf = sicd.Position.ARPPoly(0)
last_pos_ecf = sicd.Position.ARPPoly(sicd.Timeline.CollectDuration)
diff_ned = ecf_to_ned(last_pos_ecf - first_pos_ecf, sicd.SCPCOA.ARPPos.get_array(), absolute_coords=False)
track_angle = numpy.rad2deg(numpy.arctan2(diff_ned[1], diff_ned[0]))
return SensorInfoType(
Name=sicd.CollectionInfo.CollectorName,
Type='SAR',
Mode=sicd.CollectionInfo.RadarMode.ModeType,
Band=sicd.ImageFormation.get_transmit_band_name(),
Bandwidth=bandwidth,
CenterFrequency=center_freq,
Polarization=polarization,
Range=sicd.SCPCOA.SlantRange,
DepressionAngle=sicd.SCPCOA.GrazeAng,
Aimpoint=sicd.GeoData.SCP.LLH.get_array(),
AircraftHeading=heading,
AircraftTrackAngle=track_angle,
Look=look,
SquintAngle=SquintAngleType(
SlantPlane=sicd.SCPCOA.DopplerConeAng,
GroundPlane=sicd.SCPCOA.Squint),
AircraftLocation=arp_pos_llh,
AircraftVelocity=sicd.SCPCOA.ARPVel.get_array())
| 16,613 | 38.276596 | 119 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/base.py | """
Common definition for NGA modified RDE/AFRL labeling definition
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
DEFAULT_STRICT = False
| 167 | 15.8 | 63 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/CollectionInfo.py | """
Definition for the CollectionInfo NGA modified RDE/AFRL labeling object
"""
__classification__ = "UNCLASSIFIED"
__authors__ = "Thomas McCullough"
from typing import Optional
from sarpy.io.xml.base import Serializable
from sarpy.io.xml.descriptors import IntegerDescriptor, SerializableDescriptor, \
StringDescriptor, FloatDescriptor
from .base import DEFAULT_STRICT
from .blocks import DateTimeRangeType
class LocationType(Serializable):
_fields = ('Lat', 'Lon', 'Name')
_required = ('Lat', 'Lon')
# descriptors
Lat = FloatDescriptor(
'Lat', _required, strict=DEFAULT_STRICT,
docstring="General latitude of the data collection.") # type: float
Lon = FloatDescriptor(
'Lon', _required, strict=DEFAULT_STRICT,
docstring="General longitude of the data collection.") # type: float
Name = StringDescriptor(
'Name', _required,
docstring="Common name of the collection location.") # type: Optional[str]
def __init__(self, Lat=None, Lon=None, Name=None, **kwargs):
"""
Parameters
----------
Lat : float
Lon : float
Name : None|str
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Lat = Lat
self.Lon = Lon
self.Name = Name
super(LocationType, self).__init__(**kwargs)
class CollectionInfoType(Serializable):
_fields = (
'Name', 'ProgramName', 'Sponsor', 'Date', 'Location', 'NumberOfSites')
_required = ('Date', )
# descriptors
Name = StringDescriptor(
'Name', _required,
docstring="Name of the collection.") # type: Optional[str]
ProgramName = StringDescriptor(
'StringDescriptor', _required,
docstring="Name of the program that collected the data.") # type: Optional[str]
Sponsor = StringDescriptor(
'Sponsor', _required,
docstring="Sponsoring agency/organization of the data collection.") # type: Optional[str]
Date = SerializableDescriptor(
'Date', DateTimeRangeType, _required, strict=DEFAULT_STRICT,
docstring="Begin and end dates of the data collection.") # type: Optional[DateTimeRangeType]
Location = SerializableDescriptor(
'Location', LocationType, _required, strict=DEFAULT_STRICT,
docstring="General location of the data collection.") # type: Optional[LocationType]
NumberOfSites = IntegerDescriptor(
'NumberOfSites', _required, strict=DEFAULT_STRICT,
docstring="Number of different sites contained in the data collection.") # type: Optional[int]
def __init__(self, Name=None, ProgramName=None, Sponsor=None, Date=None,
Location=None, NumberOfSites=None, **kwargs):
"""
Parameters
----------
Name : None|str
ProgramName : None|str
Sponsor : None|str
Date : None|DateTimeRangeType|list|tuple
Location : None|LocationType
NumberOfSites : None|int
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Name = Name
self.ProgramName = ProgramName
self.Sponsor = Sponsor
self.Date = Date
self.Location = Location
self.NumberOfSites = NumberOfSites
super(CollectionInfoType, self).__init__(**kwargs)
| 3,637 | 33.980769 | 103 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/ObjectInfo.py | """
Definition for the ObjectInfo NGA modified RDE/AFRL labeling object
"""
__classification__ = "UNCLASSIFIED"
__authors__ = "Thomas McCullough"
import logging
from typing import Optional, List
import numpy
from sarpy.io.xml.base import Serializable, Arrayable, create_text_node, \
get_node_value
from sarpy.io.xml.descriptors import StringDescriptor, FloatDescriptor, \
IntegerDescriptor, SerializableDescriptor, SerializableListDescriptor
from sarpy.io.complex.sicd_elements.blocks import RowColType
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType
from sarpy.geometry.geocoords import geodetic_to_ecf, ecf_to_geodetic, wgs_84_norm
from sarpy.geometry.geometry_elements import Point, Polygon, GeometryCollection, Geometry
from sarpy.annotation.base import GeometryProperties
from .base import DEFAULT_STRICT
from .blocks import RangeCrossRangeType, RowColDoubleType, LatLonEleType, \
ProjectionPerturbationType, LabelSourceType
logger = logging.getLogger(__name__)
_no_projection_text = 'This sicd does not permit projection,\n\t' \
'so the image location can not be inferred'
# the Object and sub-component definitions
class PhysicalType(Serializable):
_fields = ('ChipSize', 'CenterPixel')
_required = _fields
ChipSize = SerializableDescriptor(
'ChipSize', RangeCrossRangeType, _required, strict=DEFAULT_STRICT,
docstring='The chip size of the physical object, '
'in the appropriate plane') # type: RangeCrossRangeType
CenterPixel = SerializableDescriptor(
'CenterPixel', RowColDoubleType, _required, strict=DEFAULT_STRICT,
docstring='The center pixel of the physical object, '
'in the appropriate plane') # type: RowColDoubleType
def __init__(self, ChipSize=None, CenterPixel=None, **kwargs):
"""
Parameters
----------
ChipSize : RangeCrossRangeType|numpy.ndarray|list|tuple
CenterPixel : RowColDoubleType|numpy.ndarray|list|tuple
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.ChipSize = ChipSize
self.CenterPixel = CenterPixel
super(PhysicalType, self).__init__(**kwargs)
@classmethod
def from_ranges(cls, row_range, col_range, row_limit, col_limit):
"""
Construct from the row/column ranges and limits.
Parameters
----------
row_range
col_range
row_limit
col_limit
Returns
-------
PhysicalType
"""
first_row, last_row = max(0, row_range[0]), min(row_limit, row_range[1])
first_col, last_col = max(0, col_range[0]), min(col_limit, col_range[1])
return PhysicalType(
ChipSize=(last_row-first_row, last_col-first_col),
CenterPixel=(0.5*(last_row+first_row), 0.5*(last_col+first_col)))
class PlanePhysicalType(Serializable):
_fields = (
'Physical', 'PhysicalWithShadows')
_required = _fields
Physical = SerializableDescriptor(
'Physical', PhysicalType, _required,
docstring='Chip details for the physical object in the appropriate plane') # type: PhysicalType
PhysicalWithShadows = SerializableDescriptor(
'PhysicalWithShadows', PhysicalType, _required,
docstring='Chip details for the physical object including shadows in '
'the appropriate plane') # type: PhysicalType
def __init__(self, Physical=None, PhysicalWithShadows=None, **kwargs):
"""
Parameters
----------
Physical : PhysicalType
PhysicalWithShadows : PhysicalType
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Physical = Physical
self.PhysicalWithShadows = PhysicalWithShadows
super(PlanePhysicalType, self).__init__(**kwargs)
class SizeType(Serializable, Arrayable):
_fields = ('Length', 'Width', 'Height')
_required = _fields
_numeric_format = {key: '0.17G' for key in _fields}
# Descriptors
Length = FloatDescriptor(
'Length', _required, strict=True, docstring='The Length attribute.') # type: float
Width = FloatDescriptor(
'Width', _required, strict=True, docstring='The Width attribute.') # type: float
Height = FloatDescriptor(
'Height', _required, strict=True, docstring='The Height attribute.') # type: float
def __init__(self, Length=None, Width=None, Height=None, **kwargs):
"""
Parameters
----------
Length : float
Width : float
Height : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Length, self.Width, self.Height = Length, Width, Height
super(SizeType, self).__init__(**kwargs)
def get_max_diameter(self):
"""
Gets the nominal maximum diameter for the item, in meters.
Returns
-------
float
"""
return float(numpy.sqrt(self.Length*self.Length + self.Width*self.Width))
def get_array(self, dtype='float64'):
"""
Gets an array representation of the class instance.
Parameters
----------
dtype : str|numpy.dtype|numpy.number
numpy data type of the return
Returns
-------
numpy.ndarray
array of the form [Length, Width, Height]
"""
return numpy.array([self.Length, self.Width, self.Height], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Length, Width, Height]
Returns
-------
SizeType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 3:
raise ValueError('Expected array to be of length 3, and received {}'.format(array))
return cls(Length=array[0], Width=array[1], Height=array[2])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class OrientationType(Serializable):
_fields = ('Roll', 'Pitch', 'Yaw', 'AzimuthAngle')
_required = ()
_numeric_format = {key: '0.17G' for key in _fields}
# descriptors
Roll = FloatDescriptor(
'Roll', _required) # type: float
Pitch = FloatDescriptor(
'Pitch', _required) # type: float
Yaw = FloatDescriptor(
'Yaw', _required) # type: float
AzimuthAngle = FloatDescriptor(
'AzimuthAngle', _required) # type: float
def __init__(self, Roll=None, Pitch=None, Yaw=None, AzimuthAngle=None, **kwargs):
"""
Parameters
----------
Roll : float
Pitch : float
Yaw : float
AzimuthAngle : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Roll = Roll
self.Pitch = Pitch
self.Yaw = Yaw
self.AzimuthAngle = AzimuthAngle
super(OrientationType, self).__init__(**kwargs)
class ImageLocationType(Serializable):
_fields = (
'CenterPixel', 'LeftFrontPixel', 'RightFrontPixel', 'RightRearPixel',
'LeftRearPixel')
_required = ('CenterPixel', )
# descriptors
CenterPixel = SerializableDescriptor(
'CenterPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: RowColType
LeftFrontPixel = SerializableDescriptor(
'LeftFrontPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[RowColType]
RightFrontPixel = SerializableDescriptor(
'RightFrontPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[RowColType]
RightRearPixel = SerializableDescriptor(
'RightRearPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[RowColType]
LeftRearPixel = SerializableDescriptor(
'LeftRearPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[RowColType]
def __init__(self, CenterPixel=None, LeftFrontPixel=None, RightFrontPixel=None,
RightRearPixel=None, LeftRearPixel=None, **kwargs):
"""
Parameters
----------
CenterPixel : RowColType|numpy.ndarray|list|tuple
LeftFrontPixel : RowColType|numpy.ndarray|list|tuple
RightFrontPixel : RowColType|numpy.ndarray|list|tuple
RightRearPixel : RowColType|numpy.ndarray|list|tuple
LeftRearPixel : RowColType|numpy.ndarray|list|tuple
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CenterPixel = CenterPixel
self.LeftFrontPixel = LeftFrontPixel
self.RightFrontPixel = RightFrontPixel
self.RightRearPixel = RightRearPixel
self.LeftRearPixel = LeftRearPixel
super(ImageLocationType, self).__init__(**kwargs)
@classmethod
def from_geolocation(cls, geo_location, the_structure):
"""
Construct the image location from the geographical location via
projection using the SICD model.
Parameters
----------
geo_location : GeoLocationType
the_structure : SICDType|SIDDType
Returns
-------
None|ImageLocationType
None if projection fails, the value otherwise
"""
if geo_location is None:
return None
if not the_structure.can_project_coordinates():
logger.warning(_no_projection_text)
return None
# make sure this is defined, for the sake of efficiency
the_structure.define_coa_projection(override=False)
kwargs = {}
if isinstance(the_structure, SICDType):
image_shift = numpy.array(
[the_structure.ImageData.FirstRow, the_structure.ImageData.FirstCol], dtype='float64')
else:
image_shift = numpy.zeros((2, ), dtype='float64')
for attribute in cls._fields:
value = getattr(geo_location, attribute)
if value is not None:
absolute_pixel_location, _, _ = the_structure.project_ground_to_image_geo(
value.get_array(dtype='float64'), ordering='latlong')
if numpy.any(numpy.isnan(absolute_pixel_location)):
return None
kwargs[attribute] = absolute_pixel_location - image_shift
out = ImageLocationType(**kwargs)
out.infer_center_pixel()
return out
def infer_center_pixel(self):
"""
Infer the center pixel, if not populated.
Returns
-------
None
"""
if self.CenterPixel is not None:
return
current = numpy.zeros((2, ), dtype='float64')
for entry in self._fields:
if entry == 'CenterPixel':
continue
value = getattr(self, entry)
if value is None:
return
current += 0.25*value.get_array(dtype='float64')
self.CenterPixel = RowColType.from_array(current)
def get_nominal_box(self, row_length=10, col_length=10):
"""
Get a nominal box containing the object, using the default side length if necessary.
Parameters
----------
row_length : int|float
The side length to use for the rectangle, if not defined.
col_length : int|float
The side length to use for the rectangle, if not defined.
Returns
-------
None|numpy.ndarray
"""
if self.LeftFrontPixel is not None and self.RightFrontPixel is not None and \
self.LeftRearPixel is not None and self.RightRearPixel is not None:
out = numpy.zeros((4, 2), dtype='float64')
out[0, :] = self.LeftFrontPixel.get_array()
out[1, :] = self.RightFrontPixel.get_array()
out[2, :] = self.RightRearPixel.get_array()
out[3, :] = self.LeftRearPixel.get_array()
return out
if self.CenterPixel is None:
return None
shift = numpy.array([[-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5], [0.5, -0.5]], dtype='float64')
shift[:, 0] *= row_length
shift[:, 1] *= col_length
return self.CenterPixel.get_array(dtype='float64') + shift
def get_geometry_object(self):
"""
Gets the geometry for the given image section.
Returns
-------
geometry : None|Point|GeometryCollection
geometry_properties : None|List[GeometryProperties]
"""
geometries = []
geometry_properties = []
if self.CenterPixel is not None:
geometries.append(Point(coordinates=self.CenterPixel.get_array(dtype='float64')))
geometry_properties.append(GeometryProperties(name='CenterPixel', color='blue'))
if self.LeftFrontPixel is not None and \
self.RightFrontPixel is not None and \
self.RightRearPixel is not None and \
self.LeftRearPixel is not None:
ring = numpy.zeros((4, 2), dtype='float64')
ring[0, :] = self.LeftFrontPixel.get_array(dtype='float64')
ring[1, :] = self.RightFrontPixel.get_array(dtype='float64')
ring[2, :] = self.RightRearPixel.get_array(dtype='float64')
ring[3, :] = self.LeftRearPixel.get_array(dtype='float64')
geometries.append(Polygon(coordinates=[ring, ]))
geometry_properties.append(GeometryProperties(name='Polygon', color='green'))
if len(geometries) == 0:
return None, None
elif len(geometries) == 1:
return geometries[0], geometry_properties
else:
return GeometryCollection(geometries=geometries), geometry_properties
class GeoLocationType(Serializable):
_fields = (
'CenterPixel', 'LeftFrontPixel', 'RightFrontPixel', 'RightRearPixel',
'LeftRearPixel')
_required = ('CenterPixel', )
# descriptors
CenterPixel = SerializableDescriptor(
'CenterPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonEleType
LeftFrontPixel = SerializableDescriptor(
'LeftFrontPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[LatLonEleType]
RightFrontPixel = SerializableDescriptor(
'RightFrontPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[LatLonEleType]
RightRearPixel = SerializableDescriptor(
'RightRearPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[LatLonEleType]
LeftRearPixel = SerializableDescriptor(
'LeftRearPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[LatLonEleType]
def __init__(self, CenterPixel=None, LeftFrontPixel=None, RightFrontPixel=None,
RightRearPixel=None, LeftRearPixel=None, **kwargs):
"""
Parameters
----------
CenterPixel : LatLonEleType|numpy.ndarray|list|tuple
LeftFrontPixel : None|LatLonEleType|numpy.ndarray|list|tuple
RightFrontPixel : None|LatLonEleType|numpy.ndarray|list|tuple
RightRearPixel : None|LatLonEleType|numpy.ndarray|list|tuple
LeftRearPixel : None|LatLonEleType|numpy.ndarray|list|tuple
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CenterPixel = CenterPixel
self.LeftFrontPixel = LeftFrontPixel
self.RightFrontPixel = RightFrontPixel
self.RightRearPixel = RightRearPixel
self.LeftRearPixel = LeftRearPixel
super(GeoLocationType, self).__init__(**kwargs)
# noinspection PyUnusedLocal
@classmethod
def from_image_location(cls, image_location, the_structure, projection_type='HAE', **proj_kwargs):
"""
Construct the geographical location from the image location via
projection using the SICD model.
.. Note::
This assumes that the image coordinates are with respect to the given
image (chip), and NOT including any sicd.ImageData.FirstRow/Col values,
which will be added here.
Parameters
----------
image_location : ImageLocationType
the_structure : SICDType|SIDDType
projection_type : str
The projection type selector, one of `['PLANE', 'HAE', 'DEM']`. Using `'DEM'`
requires configuration for the DEM pathway described in
:func:`sarpy.geometry.point_projection.image_to_ground_dem`.
proj_kwargs
The keyword arguments for the :func:`SICDType.project_image_to_ground_geo` method.
Returns
-------
None|GeoLocationType
Coordinates may be populated as `NaN` if projection fails.
"""
if image_location is None:
return None
if not the_structure.can_project_coordinates():
logger.warning(_no_projection_text)
return None
# make sure this is defined, for the sake of efficiency
the_structure.define_coa_projection(override=False)
if isinstance(the_structure, SICDType):
image_shift = numpy.array(
[the_structure.ImageData.FirstRow, the_structure.ImageData.FirstCol], dtype='float64')
else:
image_shift = numpy.zeros((2, ), dtype='float64')
kwargs = {}
for attribute in cls._fields:
value = getattr(image_location, attribute)
if value is not None:
coords = value.get_array(dtype='float64') + image_shift
geo_coords = the_structure.project_image_to_ground_geo(
coords, ordering='latlong', projection_type=projection_type, **proj_kwargs)
kwargs[attribute] = geo_coords
out = GeoLocationType(**kwargs)
out.infer_center_pixel()
return out
def infer_center_pixel(self):
"""
Infer the center pixel, if not populated.
Returns
-------
None
"""
if self.CenterPixel is not None:
return
current = numpy.zeros((3, ), dtype='float64')
for entry in self._fields:
if entry == 'CenterPixel':
continue
value = getattr(self, entry)
if value is None:
return
current += 0.25*geodetic_to_ecf(value.get_array(dtype='float64'))
self.CenterPixel = LatLonEleType.from_array(ecf_to_geodetic(current))
class StringWithComponentType(Serializable):
_fields = ('Component', 'Value')
_required = ('Value', )
Component = StringDescriptor(
'Component', _required) # type: str
Value = StringDescriptor(
'Value', _required) # type: str
_set_as_attribute = ('Component', )
def __init__(self, Component=None, Value=None, **kwargs):
"""
Parameters
----------
Component : str
Value : str
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Component = Component
self.Value = Value
super(StringWithComponentType, self).__init__(**kwargs)
@classmethod
def from_node(cls, node, xml_ns, ns_key=None, kwargs=None):
value = get_node_value(node)
component = node.attrib.get('Component', None)
return cls(Value=value, Component=component)
def to_node(self, doc, tag, ns_key=None, parent=None, check_validity=False, strict=DEFAULT_STRICT, exclude=()):
if (ns_key is not None and ns_key != 'default') and not tag.startswith(ns_key + ':'):
use_tag = '{}:{}'.format(ns_key, tag)
else:
use_tag = tag
if self.Value is None:
value = ''
else:
value = self.Value
node = create_text_node(doc, use_tag, value, parent=parent)
if self.Component is not None:
node.attrib['Component'] = self.Component
return node
class TheObjectType(Serializable):
_fields = (
'SystemName', 'SystemComponent', 'NATOName', 'Function', 'Version', 'DecoyType', 'SerialNumber',
'ObjectClass', 'ObjectSubClass', 'ObjectTypeClass', 'ObjectType', 'ObjectLabel',
'SlantPlane', 'GroundPlane', 'Size', 'Orientation',
'Articulation', 'Configuration',
'Accessories', 'PaintScheme', 'Camouflage', 'Obscuration', 'ObscurationPercent', 'ImageLevelObscuration',
'ImageLocation', 'GeoLocation',
'TargetToClutterRatio', 'VisualQualityMetric',
'UnderlyingTerrain', 'OverlyingTerrain', 'TerrainTexture', 'SeasonalCover',
'ProjectionPerturbation')
_required = ('SystemName', 'ImageLocation', 'GeoLocation')
_numeric_format = {'ObscurationPercent': '0.17G', }
_collections_tags = {
'Articulation': {'array': False, 'child_tag': 'Articulation'},
'Configuration': {'array': False, 'child_tag': 'Configuration'}}
# descriptors
SystemName = StringDescriptor(
'SystemName', _required, strict=DEFAULT_STRICT,
docstring='Name of the object.') # type: str
SystemComponent = StringDescriptor(
'SystemComponent', _required, strict=DEFAULT_STRICT,
docstring='Name of the weapon system component.') # type: Optional[str]
NATOName = StringDescriptor(
'NATOName', _required, strict=DEFAULT_STRICT,
docstring='Name of the object in NATO naming convention.') # type: Optional[str]
Function = StringDescriptor(
'Function', _required, strict=DEFAULT_STRICT,
docstring='Function of the object.') # type: Optional[str]
Version = StringDescriptor(
'Version', _required, strict=DEFAULT_STRICT,
docstring='Version number of the object.') # type: Optional[str]
DecoyType = StringDescriptor(
'DecoyType', _required, strict=DEFAULT_STRICT,
docstring='Object is a decoy or surrogate.') # type: Optional[str]
SerialNumber = StringDescriptor(
'SerialNumber', _required, strict=DEFAULT_STRICT,
docstring='Serial number of the object.') # type: Optional[str]
# label elements
ObjectClass = StringDescriptor(
'ObjectClass', _required, strict=DEFAULT_STRICT,
docstring='Top level class indicator; e.g., Aircraft, Ship, '
'Ground Vehicle, Missile Launcher, etc.') # type: Optional[str]
ObjectSubClass = StringDescriptor(
'ObjectSubClass', _required, strict=DEFAULT_STRICT,
docstring='Sub-class indicator; e.g., military, commercial') # type: Optional[str]
ObjectTypeClass = StringDescriptor(
'ObjectTypeClass', _required, strict=DEFAULT_STRICT,
docstring='Object type class indicator; e.g., '
'for Aircraft/Military - Propeller, Jet') # type: Optional[str]
ObjectType = StringDescriptor(
'ObjectType', _required, strict=DEFAULT_STRICT,
docstring='Object type indicator, e.g., '
'for Aircraft/Military/Jet - Bomber, Fighter') # type: Optional[str]
ObjectLabel = StringDescriptor(
'ObjectLabel', _required, strict=DEFAULT_STRICT,
docstring='Object label indicator, e.g., '
'for Bomber - Il-28, Tu-22M, Tu-160') # type: Optional[str]
SlantPlane = SerializableDescriptor(
'SlantPlane', PlanePhysicalType, _required, strict=DEFAULT_STRICT,
docstring='Object physical definition in the slant plane') # type: Optional[PlanePhysicalType]
GroundPlane = SerializableDescriptor(
'GroundPlane', PlanePhysicalType, _required, strict=DEFAULT_STRICT,
docstring='Object physical definition in the ground plane') # type: Optional[PlanePhysicalType]
# specific physical quantities
Size = SerializableDescriptor(
'Size', SizeType, _required, strict=DEFAULT_STRICT,
docstring='The actual physical size of the object') # type: Optional[SizeType]
Orientation = SerializableDescriptor(
'Orientation', OrientationType, _required, strict=DEFAULT_STRICT,
docstring='The actual orientation size of the object') # type: Optional[OrientationType]
Articulation = SerializableListDescriptor(
'Articulation', StringWithComponentType, _collections_tags, _required,
docstring='') # type: List[StringWithComponentType]
Configuration = SerializableListDescriptor(
'Configuration', StringWithComponentType, _collections_tags, _required,
docstring='') # type: List[StringWithComponentType]
Accessories = StringDescriptor(
'Accessories', _required, strict=DEFAULT_STRICT,
docstring='Defines items that are out of the norm, or have been added or removed.') # type: Optional[str]
PaintScheme = StringDescriptor(
'PaintScheme', _required, strict=DEFAULT_STRICT,
docstring='Paint scheme of object (e.g. olive drab, compass ghost grey, etc.).') # type: Optional[str]
Camouflage = StringDescriptor(
'Camouflage', _required, strict=DEFAULT_STRICT,
docstring='Details the camouflage on the object.') # type: Optional[str]
Obscuration = StringDescriptor(
'Obscuration', _required, strict=DEFAULT_STRICT,
docstring='General description of the obscuration.') # type: Optional[str]
ObscurationPercent = FloatDescriptor(
'ObscurationPercent', _required, strict=DEFAULT_STRICT,
docstring='The percent obscuration.') # type: Optional[float]
ImageLevelObscuration = StringDescriptor(
'ImageLevelObscuration', _required, strict=DEFAULT_STRICT,
docstring='Specific description of the obscuration based on the sensor look angle.') # type: Optional[str]
# location of the labeled item
ImageLocation = SerializableDescriptor(
'ImageLocation', ImageLocationType, _required, strict=DEFAULT_STRICT,
docstring='') # type: ImageLocationType
GeoLocation = SerializableDescriptor(
'GeoLocation', GeoLocationType, _required, strict=DEFAULT_STRICT,
docstring='') # type: GeoLocationType
# text quality descriptions
TargetToClutterRatio = StringDescriptor(
'TargetToClutterRatio', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
VisualQualityMetric = StringDescriptor(
'VisualQualityMetric', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
UnderlyingTerrain = StringDescriptor(
'UnderlyingTerrain', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
OverlyingTerrain = StringDescriptor(
'OverlyingTerrain', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
TerrainTexture = StringDescriptor(
'TerrainTexture', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
SeasonalCover = StringDescriptor(
'SeasonalCover', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
ProjectionPerturbation = SerializableDescriptor(
'ProjectionPerturbation', ProjectionPerturbationType, _required,
docstring='') # type: Optional[ProjectionPerturbationType]
def __init__(self, SystemName=None, SystemComponent=None, NATOName=None,
Function=None, Version=None, DecoyType=None, SerialNumber=None,
ObjectClass=None, ObjectSubClass=None, ObjectTypeClass=None,
ObjectType=None, ObjectLabel=None,
SlantPlane=None, GroundPlane=None,
Size=None, Orientation=None,
Articulation=None, Configuration=None,
Accessories=None, PaintScheme=None, Camouflage=None,
Obscuration=None, ObscurationPercent=None, ImageLevelObscuration=None,
ImageLocation=None, GeoLocation=None,
TargetToClutterRatio=None, VisualQualityMetric=None,
UnderlyingTerrain=None, OverlyingTerrain=None,
TerrainTexture=None, SeasonalCover=None,
ProjectionPerturbation=None,
**kwargs):
"""
Parameters
----------
SystemName : str
SystemComponent : None|str
NATOName : None|str
Function : None|str
Version : None|str
DecoyType : None|str
SerialNumber : None|str
ObjectClass : None|str
ObjectSubClass : None|str
ObjectTypeClass : None|str
ObjectType : None|str
ObjectLabel : None|str
SlantPlane : None|PlanePhysicalType
GroundPlane : None|PlanePhysicalType
Size : None|SizeType|numpy.ndarray|list|tuple
Orientation : OrientationType
Articulation : None|str|List[StringWithComponentType]
Configuration : None|str|List[StringWithComponentType]
Accessories : None|str
PaintScheme : None|str
Camouflage : None|str
Obscuration : None|str
ObscurationPercent : None|float
ImageLevelObscuration : None|str
ImageLocation : ImageLocationType
GeoLocation : GeoLocationType
TargetToClutterRatio : None|str
VisualQualityMetric : None|str
UnderlyingTerrain : None|str
OverlyingTerrain : None|str
TerrainTexture : None|str
SeasonalCover : None|str
ProjectionPerturbation : None|ProjectionPerturbationType
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.SystemName = SystemName
self.SystemComponent = SystemComponent
self.NATOName = NATOName
self.Function = Function
self.Version = Version
self.DecoyType = DecoyType
self.SerialNumber = SerialNumber
self.ObjectClass = ObjectClass
self.ObjectSubClass = ObjectSubClass
self.ObjectTypeClass = ObjectTypeClass
self.ObjectType = ObjectType
self.ObjectLabel = ObjectLabel
self.SlantPlane = SlantPlane
self.GroundPlane = GroundPlane
self.Size = Size
self.Orientation = Orientation
if isinstance(Articulation, (str, dict)):
self.add_articulation(Articulation)
elif isinstance(Articulation, list):
for entry in Articulation:
self.add_articulation(entry)
else:
self.Articulation = Articulation
if isinstance(Configuration, (str, dict)):
self.add_configuration(Configuration)
elif isinstance(Configuration, list):
for entry in Configuration:
self.add_configuration(entry)
else:
self.Configuration = Configuration
self.Accessories = Accessories
self.PaintScheme = PaintScheme
self.Camouflage = Camouflage
self.Obscuration = Obscuration
self.ObscurationPercent = ObscurationPercent
self.ImageLevelObscuration = ImageLevelObscuration
self.ImageLocation = ImageLocation
self.GeoLocation = GeoLocation
self.TargetToClutterRatio = TargetToClutterRatio
self.VisualQualityMetric = VisualQualityMetric
self.UnderlyingTerrain = UnderlyingTerrain
self.OverlyingTerrain = OverlyingTerrain
self.TerrainTexture = TerrainTexture
self.SeasonalCover = SeasonalCover
self.ProjectionPerturbation = ProjectionPerturbation
super(TheObjectType, self).__init__(**kwargs)
def _check_placement(self, rows, cols, row_bounds, col_bounds, overlap_cutoff=0.5):
"""
Checks the bounds condition for the provided box.
Here inclusion is defined by what proportion of the area of the proposed
chip is actually contained inside the image bounds.
Parameters
----------
rows : int|float
The number of rows in the image.
cols : int|float
The number of columns in the image.
row_bounds : List
Of the form `[row min, row max]`
col_bounds : List
Of the form `[col min, col max]`
overlap_cutoff : float
Determines the transition from in the periphery to out of the image.
Returns
-------
int
1 - completely in the image
2 - the proposed chip has `overlap_cutoff <= fractional contained area < 1`
3 - the proposed chip has `fractional contained area < overlap_cutoff`
"""
if row_bounds[1] <= row_bounds[0] or col_bounds[1] <= col_bounds[0]:
raise ValueError('bounds out of order ({}, {})'.format(row_bounds, col_bounds))
if 0 <= row_bounds[0] and rows < row_bounds[1] and 0 <= col_bounds[0] and cols < col_bounds[1]:
return 1 # completely in bounds
row_size = row_bounds[1] - row_bounds[0]
col_size = col_bounds[1] - col_bounds[0]
first_row, last_row = max(0, row_bounds[0]), min(rows, row_bounds[1])
first_col, last_col = max(0, col_bounds[0]), min(cols, col_bounds[1])
area_overlap = (last_row - first_row)*(last_col - first_col)
if area_overlap >= overlap_cutoff*row_size*col_size:
return 2 # the item is at the periphery
else:
return 3 # it should be considered out of range
def set_image_location_from_sicd(self, sicd, populate_in_periphery=False):
"""
Set the image location information with respect to the given SICD,
assuming that the physical coordinates are populated.
Parameters
----------
sicd : SICDType
populate_in_periphery : bool
Returns
-------
int
-1 - insufficient metadata to proceed or other failure
0 - nothing to be done
1 - successful
2 - object in the image periphery, populating based on `populate_in_periphery`
3 - object not in the image field
"""
if self.ImageLocation is not None:
# no need to infer anything, it's already populated
return 0
if self.GeoLocation is None:
logger.warning(
'GeoLocation is not populated,\n\t'
'so the image location can not be inferred')
return -1
if not sicd.can_project_coordinates():
logger.warning(_no_projection_text)
return -1
# gets the prospective image location
image_location = ImageLocationType.from_geolocation(self.GeoLocation, sicd)
if image_location is None:
return -1
self.ImageLocation = image_location
# get nominal object size in meters and pixels
if self.Size is None:
row_size = 2.0
col_size = 2.0
else:
max_size = self.Size.get_max_diameter()
if max_size == 0:
max_size = 10.0
row_size = max_size/sicd.Grid.Row.SS
col_size = max_size/sicd.Grid.Col.SS
# check bounding information
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
center_pixel = image_location.CenterPixel.get_array(dtype='float64')
row_bounds = [center_pixel[0] - 0.5*row_size, center_pixel[0] + 0.5*row_size]
col_bounds = [center_pixel[1] - 0.5*col_size, center_pixel[1] + 0.5*col_size]
placement = self._check_placement(rows, cols, row_bounds, col_bounds)
if placement == 3:
return placement
if placement == 2 and not populate_in_periphery:
return placement
self.ImageLocation = image_location
return placement
def set_geo_location_from_sicd(self, sicd, projection_type='HAE', **proj_kwargs):
"""
Set the geographical location information with respect to the given SICD,
assuming that the image coordinates are populated.
.. Note::
This assumes that the image coordinates are with respect to the given
image (chip), and NOT including any sicd.ImageData.FirstRow/Col values,
which will be added here.
Parameters
----------
sicd : SICDType
projection_type : str
The projection type selector, one of `['PLANE', 'HAE', 'DEM']`. Using `'DEM'`
requires configuration for the DEM pathway described in
:func:`sarpy.geometry.point_projection.image_to_ground_dem`.
proj_kwargs
The keyword arguments for the :func:`SICDType.project_image_to_ground_geo` method.
"""
if self.GeoLocation is not None:
# no need to infer anything, it's already populated
return
if self.ImageLocation is None:
logger.warning(
'ImageLocation is not populated,\n\t'
'so the geographical location can not be inferred')
return
if not sicd.can_project_coordinates():
logger.warning(_no_projection_text)
return
self.GeoLocation = GeoLocationType.from_image_location(
self.ImageLocation, sicd, projection_type=projection_type, **proj_kwargs)
def set_chip_details_from_sicd(self, sicd, layover_shift=False, populate_in_periphery=False, padding_fraction=0.05, minimum_pad=0):
"""
Set the chip information with respect to the given SICD, assuming that the
image location and size are defined.
Parameters
----------
sicd : SICDType
layover_shift : bool
Shift based on layover direction? This should be `True` if the identification of
the bounds and/or center pixel do not include any layover, as in
populating location from known ground truth. This should be `False` if
the identification of bounds and/or center pixel do include layover,
potentially as based on annotation of the imagery itself in pixel
space.
populate_in_periphery : bool
Should we populate for peripheral?
padding_fraction : None|float
Default fraction of box dimension by which to pad.
minimum_pad : int|float
The minimum number of pixels by which to pad for the chip definition
Returns
-------
int
-1 - insufficient metadata to proceed
0 - nothing to be done
1 - successful
2 - object in the image periphery, populating based on `populate_in_periphery`
3 - object not in the image field
"""
if self.SlantPlane is not None:
# no need to infer anything, it's already populated
return 0
if self.Size is None:
logger.warning(
'Size is not populated,\n\t'
'so the chip size can not be inferred')
return -1
if self.ImageLocation is None:
# try to set from geolocation
return_value = self.set_image_location_from_sicd(sicd, populate_in_periphery=populate_in_periphery)
if return_value in [-1, 3] or (return_value == 2 and not populate_in_periphery):
return return_value
# get nominal object size, in meters
max_size = self.Size.get_max_diameter() # in meters
row_size = max_size/sicd.Grid.Row.SS # in pixels
col_size = max_size/sicd.Grid.Col.SS # in pixels
# get nominal image box
image_location = self.ImageLocation
pixel_box = image_location.get_nominal_box(row_length=row_size, col_length=col_size)
ground_unit_norm = wgs_84_norm(sicd.GeoData.SCP.ECF.get_array())
slant_plane_unit_norm = numpy.cross(sicd.Grid.Row.UVectECF.get_array(), sicd.Grid.Col.UVectECF.get_array())
magnitude_factor = ground_unit_norm.dot(slant_plane_unit_norm)
# determines the relative size of things in slant plane versus ground plane
# get nominal layover vector - should be pointed generally towards the top (negative rows value)
layover_magnitude = sicd.SCPCOA.LayoverMagnitude
if layover_magnitude is None:
layover_magnitude = 0.25
layover_size = self.Size.Height*layover_magnitude*magnitude_factor
if sicd.SCPCOA.LayoverAng is None:
layover_angle = 0.0
else:
layover_angle = numpy.deg2rad(sicd.SCPCOA.LayoverAng - sicd.SCPCOA.AzimAng)
layover_vector = -layover_size*numpy.array(
[numpy.cos(layover_angle)/sicd.Grid.Row.SS, -numpy.sin(layover_angle)/sicd.Grid.Col.SS])
# craft the layover box
if layover_shift:
layover_box = pixel_box + layover_vector
else:
layover_box = pixel_box
# determine the maximum and minimum pixel values here
min_rows = min(numpy.min(pixel_box[:, 0]), numpy.min(layover_box[:, 0]))
max_rows = max(numpy.max(pixel_box[:, 0]), numpy.max(layover_box[:, 0]))
min_cols = min(numpy.min(pixel_box[:, 1]), numpy.min(layover_box[:, 1]))
max_cols = max(numpy.max(pixel_box[:, 1]), numpy.max(layover_box[:, 1]))
# determine the padding amount
padding_fraction = 0.0 if padding_fraction is None else float(padding_fraction)
if padding_fraction < 0.0:
padding_fraction = 0.0
row_pad = max(minimum_pad, padding_fraction*(max_rows-min_rows))
col_pad = max(minimum_pad, padding_fraction*(max_cols-min_cols))
# check our bounding information
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
chip_rows = [min_rows - row_pad, max_rows + row_pad]
chip_cols = [min_cols - col_pad, max_cols + col_pad]
placement = self._check_placement(rows, cols, chip_rows, chip_cols)
if placement == 3 or (placement == 2 and not populate_in_periphery):
return placement
# set the physical data ideal chip size
physical = PhysicalType.from_ranges(chip_rows, chip_cols, rows, cols)
# determine nominal shadow vector
shadow_magnitude = sicd.SCPCOA.ShadowMagnitude
if shadow_magnitude is None:
shadow_magnitude = 1.0
shadow_size = self.Size.Height*shadow_magnitude*magnitude_factor
shadow_angle = sicd.SCPCOA.Shadow
shadow_angle = numpy.pi if shadow_angle is None else numpy.deg2rad(shadow_angle)
shadow_vector = -shadow_size*numpy.array(
[numpy.cos(shadow_angle)/sicd.Grid.Row.SS, -numpy.sin(shadow_angle)/sicd.Grid.Col.SS])
shadow_box = pixel_box + shadow_vector
min_rows = min(min_rows, numpy.min(shadow_box[:, 0]))
max_rows = max(max_rows, numpy.max(shadow_box[:, 0]))
min_cols = min(min_cols, numpy.min(shadow_box[:, 1]))
max_cols = max(max_cols, numpy.max(shadow_box[:, 1]))
chip_rows = [min_rows - row_pad, max_rows + row_pad]
chip_cols = [min_cols - col_pad, max_cols + col_pad]
# set the physical with shadows data ideal chip size
physical_with_shadows = PhysicalType.from_ranges(chip_rows, chip_cols, rows, cols)
self.SlantPlane = PlanePhysicalType(
Physical=physical,
PhysicalWithShadows=physical_with_shadows)
return placement
def get_image_geometry_object_for_sicd(self, include_chip=False):
"""
Gets the geometry element describing the image geometry for a sicd.
Returns
-------
geometry : Geometry
The geometry object
geometry_properties : List[GeometryProperties]
The associated geometry properties list
"""
if self.ImageLocation is None:
raise ValueError('No ImageLocation defined.')
image_geometry_object, geometry_properties = self.ImageLocation.get_geometry_object()
if image_geometry_object is None:
return None, None
if not include_chip or self.SlantPlane is None:
return image_geometry_object, geometry_properties
center_pixel = self.SlantPlane.Physical.CenterPixel.get_array()
chip_size = self.SlantPlane.Physical.ChipSize.get_array()
shift = numpy.array([[-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5], [0.5, -0.5]], dtype='float64')
shift[:, 0] *= chip_size[0]
shift[:, 1] *= chip_size[1]
chip_rect = center_pixel + shift
chip_area = Polygon(coordinates=[chip_rect, ])
geometry_properties.append(GeometryProperties(name='Physical', color='red'))
if isinstance(image_geometry_object, GeometryCollection):
image_geometry_object.geometries.append(chip_area)
else:
image_geometry_object = GeometryCollection(geometries=[image_geometry_object, chip_area])
center_pixel = self.SlantPlane.PhysicalWithShadows.CenterPixel.get_array()
chip_size = self.SlantPlane.PhysicalWithShadows.ChipSize.get_array()
shift = numpy.array([[-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5], [0.5, -0.5]], dtype='float64')
shift[:, 0] *= chip_size[0]
shift[:, 1] *= chip_size[1]
chip_rect = center_pixel + shift
chip_area = Polygon(coordinates=[chip_rect, ])
geometry_properties.append(GeometryProperties(name='PhysicalWithShadows', color='magenta'))
image_geometry_object.geometries.append(chip_area)
return image_geometry_object, geometry_properties
def add_articulation(self, value):
if value is None:
return
if isinstance(value, str):
value = StringWithComponentType(Value=value)
elif isinstance(value, dict):
value = StringWithComponentType(**value)
if not isinstance(value, StringWithComponentType):
raise TypeError('values for Articulation must be of type str or StringWithComponentType')
if self.Articulation is None:
self.Articulation = [value, ]
else:
self.Articulation.append(value)
def add_configuration(self, value):
if value is None:
return
if isinstance(value, str):
value = StringWithComponentType(Value=value)
elif isinstance(value, dict):
value = StringWithComponentType(**value)
if not isinstance(value, StringWithComponentType):
raise TypeError('values for Configuration must be of type str or StringWithComponentType')
if self.Configuration is None:
self.Configuration = [value, ]
else:
self.Configuration.append(value)
# the main type
class ObjectInfoType(Serializable):
_fields = (
'NumberOfObjectsInImage', 'NumberOfObjectsInScene', 'LabelSource', 'Objects')
_required = ('NumberOfObjectsInImage', 'NumberOfObjectsInScene', 'LabelSource', 'Objects')
_collections_tags = {'Objects': {'array': False, 'child_tag': 'Object'}}
# descriptors
NumberOfObjectsInImage = IntegerDescriptor(
'NumberOfObjectsInImage', _required, strict=DEFAULT_STRICT,
docstring='Number of ground truthed objects in the image.') # type: int
NumberOfObjectsInScene = IntegerDescriptor(
'NumberOfObjectsInScene', _required, strict=DEFAULT_STRICT,
docstring='Number of ground truthed objects in the scene.') # type: int
LabelSource = SerializableDescriptor(
'LabelSource', LabelSourceType, _required, strict=DEFAULT_STRICT,
docstring='The source of the labels') # type: LabelSourceType
Objects = SerializableListDescriptor(
'Objects', TheObjectType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='The object collection') # type: List[TheObjectType]
def __init__(self, NumberOfObjectsInImage=None, NumberOfObjectsInScene=None,
LabelSource=None, Objects=None, **kwargs):
"""
Parameters
----------
NumberOfObjectsInImage : int
NumberOfObjectsInScene : int
LabelSource : LabelSourceType
Objects : List[ObjectType]
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.NumberOfObjectsInImage = NumberOfObjectsInImage
self.NumberOfObjectsInScene = NumberOfObjectsInScene
self.LabelSource = LabelSource
self.Objects = Objects
super(ObjectInfoType, self).__init__(**kwargs)
def set_image_location_from_sicd(
self, sicd, layover_shift=False, populate_in_periphery=False,
include_out_of_range=False, padding_fraction=None, minimum_pad=None):
"""
Set the image location information with respect to the given SICD,
assuming that the physical coordinates are populated. The `NumberOfObjectsInImage`
will be set, and `NumberOfObjectsInScene` will be left unchanged.
Parameters
----------
sicd : SICDType
layover_shift : bool
Account for possible layover shift in calculated chip sizes?
populate_in_periphery : bool
Populate image information for objects on the periphery?
include_out_of_range : bool
Include the objects which are out of range (with no image location information)?
padding_fraction : None|float
minimum_pad : None|int|float
"""
def update_object(temp_object, in_image_count):
status = temp_object.set_image_location_from_sicd(
sicd, populate_in_periphery=populate_in_periphery)
use_object = False
if status == 0:
raise ValueError('Object already has image details set')
if status == 1 or (status == 2 and populate_in_periphery):
use_object = True
temp_object.set_chip_details_from_sicd(
sicd, layover_shift=layover_shift, populate_in_periphery=True,
padding_fraction=padding_fraction, minimum_pad=minimum_pad)
in_image_count += 1
return use_object, in_image_count
objects_in_image = 0
if include_out_of_range:
# the objects list is just modified in place
for the_object in self.Objects:
_, objects_in_image = update_object(the_object, objects_in_image)
else:
# we make a new objects list
objects = []
for the_object in self.Objects:
use_this_object, objects_in_image = update_object(the_object, objects_in_image)
if use_this_object:
objects.append(the_object)
self.Objects = objects
self.NumberOfObjectsInImage = objects_in_image
| 52,249 | 39.441176 | 135 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/ImageInfo.py | """
Definition for the ImageInfo NGA modified RDE/AFRL labeling object
"""
__classification__ = "UNCLASSIFIED"
__authors__ = "Thomas McCullough"
from typing import Optional
import numpy
from sarpy.io.xml.base import Serializable, Arrayable
from sarpy.io.xml.descriptors import StringDescriptor, SerializableDescriptor, \
IntegerDescriptor, StringEnumDescriptor, DateTimeDescriptor, FloatDescriptor, \
BooleanDescriptor
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.blocks import LatLonType
from .base import DEFAULT_STRICT
from .blocks import RangeCrossRangeType, ProjectionPerturbationType
class NumPixelsType(Serializable, Arrayable):
"""A row and column attribute container - used as indices into array(s)."""
_fields = ('NumRows', 'NumCols')
_required = _fields
NumRows = IntegerDescriptor(
'NumRows', _required, strict=True, docstring='The number of rows.') # type: int
NumCols = IntegerDescriptor(
'NumCols', _required, strict=True, docstring='The number of columns.') # type: int
def __init__(self, NumRows=None, NumCols=None, **kwargs):
"""
Parameters
----------
NumRows : int
NumCols : int
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.NumRows, self.NumCols = NumRows, NumCols
super(NumPixelsType, self).__init__(**kwargs)
def get_array(self, dtype=numpy.int64):
"""
Gets an array representation of the class instance.
Parameters
----------
dtype : str|numpy.dtype|numpy.number
numpy data type of the return
Returns
-------
numpy.ndarray
array of the form [NumRows, NumCols]
"""
return numpy.array([self.NumRows, self.NumCols], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [NumRows, NumCols]
Returns
-------
NumPixelsType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 2:
raise ValueError('Expected array to be of length 2, and received {}'.format(array))
return cls(NumRows=array[0], NumCols=array[1])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class ClassificationMarkingsType(Serializable):
_fields = (
'Classification', 'Restrictions', 'ClassifiedBy', 'DeclassifyOn', 'DerivedFrom')
_required = ('Classification', 'Restrictions')
# descriptors
Classification = StringDescriptor(
'Classification', _required, default_value='',
docstring='The image classification') # type: str
Restrictions = StringDescriptor(
'Restrictions', _required, default_value='',
docstring='Additional caveats to the classification') # type: str
ClassifiedBy = StringDescriptor(
'ClassifiedBy', _required) # type: Optional[str]
DeclassifyOn = StringDescriptor(
'DeclassifyOn', _required) # type: Optional[str]
DerivedFrom = StringDescriptor(
'DerivedFrom', _required) # type: Optional[str]
def __init__(self, Classification='', Restrictions='', ClassifiedBy=None,
DeclassifyOn=None, DerivedFrom=None, **kwargs):
"""
Parameters
----------
Classification : str
Restrictions : str
ClassifiedBy : None|str
DeclassifyOn : None|str
DerivedFrom : None|str
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Classification = Classification
self.Restrictions = Restrictions
self.ClassifiedBy = ClassifiedBy
self.DeclassifyOn = DeclassifyOn
self.DerivedFrom = DerivedFrom
super(ClassificationMarkingsType, self).__init__(**kwargs)
class StringRangeCrossRangeType(Serializable):
"""
A range and cross range attribute container
"""
_fields = ('Range', 'CrossRange')
_required = _fields
# descriptors
Range = StringDescriptor(
'Range', _required, strict=True, docstring='The Range attribute.') # type: str
CrossRange = StringDescriptor(
'CrossRange', _required, strict=True, docstring='The Cross Range attribute.') # type: str
def __init__(self, Range=None, CrossRange=None, **kwargs):
"""
Parameters
----------
Range : str
CrossRange : str
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Range, self.CrossRange = Range, CrossRange
super(StringRangeCrossRangeType, self).__init__(**kwargs)
class ImageCornerType(Serializable):
_fields = (
'UpperLeft', 'UpperRight', 'LowerRight', 'LowerLeft')
_required = _fields
# descriptors
UpperLeft = SerializableDescriptor(
'UpperLeft', LatLonType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonType
UpperRight = SerializableDescriptor(
'UpperRight', LatLonType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonType
LowerRight = SerializableDescriptor(
'LowerRight', LatLonType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonType
LowerLeft = SerializableDescriptor(
'LowerLeft', LatLonType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonType
def __init__(self, UpperLeft=None, UpperRight=None,
LowerRight=None, LowerLeft=None, **kwargs):
"""
Parameters
----------
UpperLeft : LatLonType|numpy.ndarray|list|tuple
UpperRight : LatLonType|numpy.ndarray|list|tuple
LowerRight : LatLonType|numpy.ndarray|list|tuple
LowerLeft : LatLonType|numpy.ndarray|list|tuple
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.UpperLeft = UpperLeft
self.UpperRight = UpperRight
self.LowerRight = LowerRight
self.LowerLeft = LowerLeft
super(ImageCornerType, self).__init__(**kwargs)
class PixelSpacingType(Serializable):
_fields = ('PixelSpacing', )
_required = _fields
# descriptors
PixelSpacing = SerializableDescriptor(
'PixelSpacing', RangeCrossRangeType, _required, strict=DEFAULT_STRICT,
docstring='The center-to-center pixel spacing in meters.') # type: RangeCrossRangeType
def __init__(self, PixelSpacing=None, **kwargs):
"""
Parameters
----------
PixelSpacing : RangeCrossRangeType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.PixelSpacing = PixelSpacing
super(PixelSpacingType, self).__init__(**kwargs)
class ImageInfoType(Serializable):
_fields = (
'DataFilename', 'ClassificationMarkings', 'Filetype', 'DataCheckSum',
'DataSize', 'DataPlane', 'DataDomain', 'DataType', 'BitsPerSample',
'DataFormat', 'DataByteOrder', 'NumPixels', 'ImageCollectionDate', 'ZuluOffset',
'SensorReferencePoint', 'SensorCalibrationFactor', 'DataCalibrated',
'Resolution', 'PixelSpacing', 'WeightingType', 'OverSamplingFactor',
'IPRWidth3dB', 'ImageQualityDescription', 'ImageHeading',
'ImageCorners', 'SlantPlane', 'GroundPlane', 'SceneCenterReferenceLine',
'ProjectionPerturbation')
_required = (
'DataFilename', 'ClassificationMarkings', 'DataCheckSum', 'DataPlane',
'DataDomain', 'DataType', 'DataFormat', 'NumPixels', 'ImageCollectionDate',
'SensorReferencePoint', 'DataCalibrated', 'Resolution', 'PixelSpacing',
'WeightingType', 'ImageCorners')
_numeric_format = {
'ImageHeading': '0.17G', 'SensorCalibrationFactor': '0.17G',
'SceneCenterReferenceLine': '0.17G', }
# descriptors
DataFilename = StringDescriptor(
'DataFilename', _required,
docstring='The base file name to which this information pertains') # type: str
ClassificationMarkings = SerializableDescriptor(
'ClassificationMarkings', ClassificationMarkingsType, _required,
docstring='The classification information') # type: ClassificationMarkingsType
Filetype = StringDescriptor(
'Filetype', _required,
docstring='The image file type') # type: Optional[str]
DataCheckSum = StringDescriptor(
'DataCheckSum', _required,
docstring='The 32 character (hexidecimal digest) MD5 checksum of the '
'full image file') # type: str
DataSize = IntegerDescriptor(
'DataSize', _required,
docstring='The image size in bytes') # type: Optional[int]
DataPlane = StringEnumDescriptor(
'DataPlane', {'Slant', 'Ground'}, _required, default_value='Slant',
docstring='The image plane.') # type: str
DataDomain = StringEnumDescriptor(
'DataDomain', {'Image', }, _required, # todo: values
docstring='The image data domain') # type: str
DataType = StringEnumDescriptor(
'DataType', {'Magnitude/Phase', 'In-phase/Quadrature'}, _required,
docstring='The image data type') # type: str
BitsPerSample = IntegerDescriptor(
'BitsPerSample', _required,
docstring='The number of bits per sample') # type: Optional[int]
DataFormat = StringDescriptor(
'DataFormat', _required,
docstring='The image data format') # type: str
DataByteOrder = StringEnumDescriptor(
'DataByteOrder', {'Big-Endian', 'Little-Endian'}, _required,
docstring='The image data byte order.') # type: Optional[str]
NumPixels = SerializableDescriptor(
'NumPixels', NumPixelsType, _required,
docstring='The number of image pixels') # type: NumPixelsType
ImageCollectionDate = DateTimeDescriptor(
'ImageCollectionDate', _required,
docstring='The date/time of the image collection in UTC') # type: Optional[numpy.datetime64]
ZuluOffset = StringDescriptor(
'ZuluOffset', _required,
docstring='The local time offset from UTC') # type: Optional[str]
SensorReferencePoint = StringEnumDescriptor(
'DataPlane', {'Left', 'Right', 'Top', 'Bottom'}, _required,
docstring='Description of the sensor location relative to the scene.') # type: Optional[str]
SensorCalibrationFactor = FloatDescriptor(
'SensorCalibrationFactor', _required,
docstring='Multiplicative factor used to scale raw image data to the return '
'of a calibrated reference reflector or active source') # type: Optional[float]
DataCalibrated = BooleanDescriptor(
'DataCalibrated', _required,
docstring='Has the data been calibrated?') # type: bool
Resolution = SerializableDescriptor(
'Resolution', RangeCrossRangeType, _required,
docstring='Resolution (intrinsic) of the sensor system/mode in meters.') # type: RangeCrossRangeType
PixelSpacing = SerializableDescriptor(
'PixelSpacing', RangeCrossRangeType, _required,
docstring='Pixel spacing of the image in meters.') # type: RangeCrossRangeType
WeightingType = SerializableDescriptor(
'WeightingType', StringRangeCrossRangeType, _required,
docstring='Weighting function applied to the image during formation.') # type: StringRangeCrossRangeType
OverSamplingFactor = SerializableDescriptor(
'OverSamplingFactor', RangeCrossRangeType, _required,
docstring='The factor by which the pixel space is oversampled.') # type: Optional[RangeCrossRangeType]
IPRWidth3dB = SerializableDescriptor(
'IPRWidth3dB', RangeCrossRangeType, _required,
docstring='The 3 dB system impulse response with, in meters') # type: Optional[RangeCrossRangeType]
ImageQualityDescription = StringDescriptor(
'ImageQualityDescription', _required,
docstring='General description of image quality') # type: Optional[str]
ImageHeading = FloatDescriptor(
'ImageHeading', _required,
docstring='Image heading relative to True North, in degrees') # type: Optional[float]
ImageCorners = SerializableDescriptor(
'ImageCorners', ImageCornerType, _required,
docstring='The image corners') # type: ImageCornerType
SlantPlane = SerializableDescriptor(
'SlantPlane', PixelSpacingType, _required,
docstring='The slant plane pixel spacing') # type: Optional[PixelSpacingType]
GroundPlane = SerializableDescriptor(
'GroundPlane', PixelSpacingType, _required,
docstring='The ground plane pixel spacing') # type: Optional[PixelSpacingType]
SceneCenterReferenceLine = FloatDescriptor(
'SceneCenterReferenceLine', _required,
docstring='The ideal line (heading) at the intersection of the radar '
'line-of-sight with the horizontal reference plane '
'created by the forward motion of the aircraft, '
'in degrees') # type: Optional[float]
ProjectionPerturbation = SerializableDescriptor(
'ProjectionPerturbation', ProjectionPerturbationType, _required,
docstring='') # type: Optional[ProjectionPerturbationType]
def __init__(self, DataFilename=None, ClassificationMarkings=None,
FileType=None, DataCheckSum=None, DataSize=None,
DataPlane='Slant', DataDomain=None, DataType=None,
BitsPerSample=None, DataFormat=None, DataByteOrder=None, NumPixels=None,
ImageCollectionDate=None, ZuluOffset=None,
SensorReferencePoint=None, SensorCalibrationFactor=None,
DataCalibrated=None, Resolution=None, PixelSpacing=None,
WeightingType=None, OverSamplingFactor=None, IPRWidth3dB=None,
ImageQualityDescription=None, ImageHeading=None, ImageCorners=None,
SlantPlane=None, GroundPlane=None, SceneCenterReferenceLine=None,
ProjectionPerturbation=None,
**kwargs):
"""
Parameters
----------
DataFilename : str
ClassificationMarkings : ClassificationMarkingsType
FileType : str
DataCheckSum : str
DataSize : int
DataPlane : str
DataDomain : None|str
DataType : None|str
BitsPerSample : None|int
DataFormat : None|str
DataByteOrder : None|str
NumPixels : NumPixelsType|numpy.ndarray|list|tuple
ImageCollectionDate : numpy.datetime64|datetime|date|str
ZuluOffset : None|str
SensorReferencePoint : None|str
SensorCalibrationFactor : None|float
DataCalibrated : bool
Resolution : RangeCrossRangeType|numpy.ndarray|list|tuple
PixelSpacing : RangeCrossRangeType|numpy.ndarray|list|tuple
WeightingType : StringRangeCrossRangeType
OverSamplingFactor : None|RangeCrossRangeType
IPRWidth3dB : None|RangeCrossRangeType|numpy.ndarray|list|tuple
ImageQualityDescription : None|str
ImageHeading : None|float
ImageCorners : ImageCornerType
SlantPlane : None|PixelSpacingType
GroundPlane : None|PixelSpacingType
SceneCenterReferenceLine : None|float
ProjectionPerturbation : None|ProjectionPerturbationType
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.DataFilename = DataFilename
if ClassificationMarkings is None:
self.ClassificationMarkings = ClassificationMarkingsType()
else:
self.ClassificationMarkings = ClassificationMarkings
self.Filetype = FileType
self.DataCheckSum = DataCheckSum
self.DataSize = DataSize
self.DataPlane = DataPlane
self.DataDomain = DataDomain
self.DataType = DataType
self.BitsPerSample = BitsPerSample
self.DataFormat = DataFormat
self.DataByteOrder = DataByteOrder
self.NumPixels = NumPixels
self.ImageCollectionDate = ImageCollectionDate
self.ZuluOffset = ZuluOffset
self.SensorReferencePoint = SensorReferencePoint
self.SensorCalibrationFactor = SensorCalibrationFactor
self.DataCalibrated = DataCalibrated
self.Resolution = Resolution
self.PixelSpacing = PixelSpacing
self.WeightingType = WeightingType
self.OverSamplingFactor = OverSamplingFactor
self.IPRWidth3dB = IPRWidth3dB
self.ImageQualityDescription = ImageQualityDescription
self.ImageHeading = ImageHeading
self.ImageCorners = ImageCorners
self.SlantPlane = SlantPlane
self.GroundPlane = GroundPlane
self.SceneCenterReferenceLine = SceneCenterReferenceLine
self.ProjectionPerturbation = ProjectionPerturbation
super(ImageInfoType, self).__init__(**kwargs)
@classmethod
def from_sicd(cls, sicd, base_file_name, file_type='NITF02.10', md5_checksum=None):
"""
Construct the ImageInfo from the sicd object and given image file name.
Parameters
----------
sicd : SICDType
base_file_name : str
file_type : str
The file type. This should probably always be NITF02.10 for now.
md5_checksum : None|str
The md5 checksum of the full image file.
Returns
-------
ImageInfoType
"""
pixel_type = sicd.ImageData.PixelType
if pixel_type == 'RE32F_IM32F':
data_type = 'In-phase/Quadrature'
bits_per_sample = 32
data_format = 'float'
elif pixel_type == 'RE16I_IM16I':
data_type = 'In-phase/Quadrature'
bits_per_sample = 16
data_format = 'integer'
elif pixel_type == 'AMP8I_PHS8I':
data_type = 'Magnitude/Phase'
bits_per_sample = 8
data_format = 'unsigned integer'
else:
raise ValueError('Unhandled')
data_cal = sicd.Radiometric is not None
icps = ImageCornerType(
UpperLeft=sicd.GeoData.ImageCorners.FRFC,
UpperRight=sicd.GeoData.ImageCorners.FRLC,
LowerRight=sicd.GeoData.ImageCorners.LRLC,
LowerLeft=sicd.GeoData.ImageCorners.LRFC)
if sicd.Grid.ImagePlane == 'SLANT':
data_plane = 'Slant'
elif sicd.Grid.ImagePlane == 'Ground':
data_plane = 'Ground'
else:
data_plane = None
has_perturb = False
proj_perturb = None
coa = sicd.coa_projection
if coa is not None:
delta_arp = coa.delta_arp
if numpy.any(delta_arp != 0):
has_perturb = True
else:
delta_arp = None
delta_varp = coa.delta_varp
if numpy.any(delta_varp != 0):
has_perturb = True
else:
delta_varp = None
delta_range = coa.delta_range
if delta_range != 0:
has_perturb = True
else:
delta_range = None
if has_perturb:
proj_perturb = ProjectionPerturbationType(
CoordinateFrame='ECF',
DeltaArp=delta_arp,
DeltaVarp=delta_varp,
DeltaRange=delta_range)
return ImageInfoType(
DataFilename=base_file_name,
ClassificationMarkings=ClassificationMarkingsType(
Classification=sicd.CollectionInfo.Classification),
FileType=file_type,
DataCheckSum=md5_checksum,
DataPlane=data_plane,
DataType=data_type,
DataCalibrated=data_cal,
BitsPerSample=bits_per_sample,
DataDomain='Image',
DataFormat=data_format,
DataByteOrder='Big-Endian',
NumPixels=(sicd.ImageData.NumRows, sicd.ImageData.NumCols),
ImageCollectionDate=sicd.Timeline.CollectStart,
SensorReferencePoint='Top',
Resolution=(sicd.Grid.Row.ImpRespWid, sicd.Grid.Col.ImpRespWid),
PixelSpacing=(sicd.Grid.Row.SS, sicd.Grid.Col.SS),
WeightingType=StringRangeCrossRangeType(
Range=sicd.Grid.Row.WgtType.WindowName,
CrossRange=sicd.Grid.Col.WgtType.WindowName),
ImageHeading=sicd.SCPCOA.AzimAng,
ImageCorners=icps,
ProjectionPerturbation=proj_perturb)
| 21,522 | 39.456767 | 113 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/Research.py | """
Definition for the main NGA modified RDE/AFRL labeling object
"""
__classification__ = "UNCLASSIFIED"
__authors__ = "Thomas McCullough"
from typing import Optional
import os
from sarpy.io.xml.base import Serializable, parse_xml_from_string, parse_xml_from_file
from sarpy.io.xml.descriptors import SerializableDescriptor, StringDescriptor
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd import SICDReader
from sarpy.io.general.utils import calculate_md5
from .base import DEFAULT_STRICT
from .CollectionInfo import CollectionInfoType
from .SubCollectionInfo import SubCollectionInfoType
from .ImageInfo import ImageInfoType
from .SensorInfo import SensorInfoType
from .FiducialInfo import FiducialInfoType
from .ObjectInfo import ObjectInfoType
_AFRL_SPECIFICATION_NAMESPACE = 'urn:AFRL_RDE:1.0.0'
class ResearchType(Serializable):
_fields = (
'MetadataVersion', 'DetailCollectionInfo', 'DetailSubCollectionInfo',
'DetailImageInfo', 'DetailSensorInfo', 'DetailFiducialInfo', 'DetailObjectInfo')
_required = (
'MetadataVersion', 'DetailCollectionInfo', 'DetailSubCollectionInfo',
'DetailImageInfo', 'DetailSensorInfo', 'DetailFiducialInfo', 'DetailObjectInfo')
# descriptors
MetadataVersion = StringDescriptor(
'MetadataVersion', _required,
docstring='The version number') # type: str
DetailCollectionInfo = SerializableDescriptor(
'DetailCollectionInfo', CollectionInfoType, _required,
docstring='High level information about the data collection'
) # type: Optional[CollectionInfoType]
DetailSubCollectionInfo = SerializableDescriptor(
'DetailSubCollectionInfo', SubCollectionInfoType, _required,
docstring='Information about sub-division of the overall data collection'
) # type: Optional[SubCollectionInfoType]
DetailImageInfo = SerializableDescriptor(
'DetailImageInfo', ImageInfoType, _required,
docstring='Information about the referenced image'
) # type: Optional[ImageInfoType]
DetailSensorInfo = SerializableDescriptor(
'DetailSensorInfo', SensorInfoType, _required,
docstring='Information about the sensor'
) # type: Optional[SensorInfoType]
DetailFiducialInfo = SerializableDescriptor(
'DetailFiducialInfo', FiducialInfoType, _required,
docstring='Information about the ground-truthed fiducials'
) # type: Optional[FiducialInfoType]
DetailObjectInfo = SerializableDescriptor(
'DetailObjectInfo', ObjectInfoType, _required,
docstring='Information about the ground-truthed objects'
) # type: Optional[ObjectInfoType]
def __init__(self, MetadataVersion='Unknown', DetailCollectionInfo=None,
DetailSubCollectionInfo=None, DetailImageInfo=None,
DetailSensorInfo=None, DetailFiducialInfo=None,
DetailObjectInfo=None, **kwargs):
"""
Parameters
----------
MetadataVersion : str
DetailCollectionInfo : CollectionInfoType
DetailSubCollectionInfo : SubCollectionInfoType
DetailImageInfo : ImageInfoType
DetailSensorInfo : SensorInfo
DetailFiducialInfo : FiducialInfoType
DetailObjectInfo : ObjectInfoType
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.MetadataVersion = MetadataVersion
self.DetailCollectionInfo = DetailCollectionInfo
self.DetailSubCollectionInfo = DetailSubCollectionInfo
self.DetailImageInfo = DetailImageInfo
self.DetailSensorInfo = DetailSensorInfo
self.DetailFiducialInfo = DetailFiducialInfo
self.DetailObjectInfo = DetailObjectInfo
super(ResearchType, self).__init__(**kwargs)
def to_xml_bytes(self, urn=None, tag='RESEARCH', check_validity=False, strict=DEFAULT_STRICT):
if urn is None:
urn = _AFRL_SPECIFICATION_NAMESPACE
return super(ResearchType, self).to_xml_bytes(urn=urn, tag=tag, check_validity=check_validity, strict=strict)
def to_xml_string(self, urn=None, tag='RESEARCH', check_validity=False, strict=DEFAULT_STRICT):
return self.to_xml_bytes(urn=urn, tag=tag, check_validity=check_validity, strict=strict).decode('utf-8')
def apply_sicd(self, sicd, base_file_name, layover_shift=False, populate_in_periphery=False, include_out_of_range=False,
padding_fraction=0.05, minimum_pad=0, md5_checksum=None):
"""
Apply the given sicd to define all the relevant derived data, assuming
that the starting point is physical ground truth populated, and image
details and locations will be inferred. This modifies the structure in
place.
Parameters
----------
sicd : SICDType
base_file_name : str
layover_shift : bool
populate_in_periphery : bool
include_out_of_range : bool
padding_fraction : None|float
minimum_pad : int|float
md5_checksum : None|str
"""
# assume that collection info and subcollection info are previously defined
# define the image info
if self.DetailImageInfo is not None:
raise ValueError('Image Info is already defined')
self.DetailImageInfo = ImageInfoType.from_sicd(
sicd, base_file_name, md5_checksum=md5_checksum)
# define sensor info
if self.DetailSensorInfo is not None:
raise ValueError('Sensor Info is already defined')
self.DetailSensorInfo = SensorInfoType.from_sicd(sicd)
if self.DetailFiducialInfo is None:
self.DetailFiducialInfo = FiducialInfoType(
NumberOfFiducialsInImage=0, NumberOfFiducialsInScene=0)
else:
self.DetailFiducialInfo.set_image_location_from_sicd(
sicd,
populate_in_periphery=populate_in_periphery,
include_out_of_range=include_out_of_range)
if self.DetailObjectInfo is None:
self.DetailObjectInfo = ObjectInfoType(
NumberOfObjectsInImage=0, NumberOfObjectsInScene=0)
else:
self.DetailObjectInfo.set_image_location_from_sicd(
sicd,
layover_shift=layover_shift,
populate_in_periphery=populate_in_periphery,
include_out_of_range=include_out_of_range,
padding_fraction=padding_fraction,
minimum_pad=minimum_pad)
def apply_sicd_reader(
self, sicd_reader, layover_shift=False, populate_in_periphery=False, include_out_of_range=False,
padding_fraction=0.05, minimum_pad=0, populate_md5=True):
"""
Apply the given sicd to define all the relevant derived data, assuming
that the starting point is physical ground truth populated, and image
details and locations will be inferred. This modifies the structure in
place.
Parameters
----------
sicd_reader : SICDReader
layover_shift : bool
populate_in_periphery : bool
include_out_of_range : bool
padding_fraction : None|float
minimum_pad : int|float
populate_md5 : bool
"""
md5_checksum = None if (sicd_reader.file_name is None or not populate_md5) \
else calculate_md5(sicd_reader.file_name)
base_file = os.path.split(sicd_reader.file_name)[1]
self.apply_sicd(
sicd_reader.sicd_meta,
base_file,
layover_shift=layover_shift,
populate_in_periphery=populate_in_periphery,
include_out_of_range=include_out_of_range,
padding_fraction=padding_fraction,
minimum_pad=minimum_pad,
md5_checksum=md5_checksum)
@classmethod
def from_xml_file(cls, file_path):
"""
Construct the research object from an xml file path.
Parameters
----------
file_path : str
Returns
-------
ResearchType
"""
root_node, xml_ns = parse_xml_from_file(file_path)
ns_key = 'default' if 'default' in xml_ns else None
return cls.from_node(root_node, xml_ns=xml_ns, ns_key=ns_key)
@classmethod
def from_xml_string(cls, xml_string):
"""
Construct the research object from an xml string.
Parameters
----------
xml_string : str|bytes
Returns
-------
ResearchType
"""
root_node, xml_ns = parse_xml_from_string(xml_string)
ns_key = 'default' if 'default' in xml_ns else None
return cls.from_node(root_node, xml_ns=xml_ns, ns_key=ns_key)
| 8,911 | 38.087719 | 124 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/SubCollectionInfo.py | """
Definition for the SubCollectionInfo NGA modified RDE/AFRL labeling object
"""
__classification__ = "UNCLASSIFIED"
__authors__ = "Thomas McCullough"
from typing import Optional
from sarpy.io.xml.base import Serializable
from sarpy.io.xml.descriptors import SerializableDescriptor, StringDescriptor
from .base import DEFAULT_STRICT
from .blocks import DateTimeRangeType, LatLonEleType
class SubCollectionInfoType(Serializable):
_fields = ('Name', 'SiteName', 'SiteNumber', 'SceneNumber', 'Description',
'Duration', 'SiteCenterLocation', 'SceneContentDescription',
'SiteBackgroundType')
_required = ('Name', 'SiteCenterLocation', 'SceneContentDescription')
# descriptors
Name = StringDescriptor(
'Name', _required,
docstring="Name of the subcollection.") # type: str
SiteName = StringDescriptor(
'SiteName', _required,
docstring="Name of the subcollection site location.") # type: Optional[str]
SiteNumber = StringDescriptor(
'SiteNumber', _required,
docstring="Site number of the subcollection.") # type: Optional[str]
SceneNumber = StringDescriptor(
'SceneNumber', _required,
docstring="Scene number of the subcollection.") # type: Optional[str]
Description = StringDescriptor(
'Description', _required,
docstring="Description of the subcollection (e.g., Main array).") # type: Optional[str]
Duration = SerializableDescriptor(
'Duration', DateTimeRangeType, _required, strict=DEFAULT_STRICT,
docstring="Begin and end dates of the subcollection.") # type: Optional[DateTimeRangeType]
SiteCenterLocation = SerializableDescriptor(
'SiteCenterLocation', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring="Location of the center of the collection site.") # type: LatLonEleType
SceneContentDescription = StringDescriptor(
'SceneContentDescription', _required, default_value="",
docstring="Description of the general scene contents.") # type: str
SiteBackgroundType = StringDescriptor(
'SiteBackgroundType', _required,
docstring="Description of the background.") # type: Optional[str]
def __init__(self, Name=None, SiteName=None, SiteNumber=None,
SceneNumber=None, Description=None, Duration=None,
SiteCenterLocation=None, SceneContentDescription=None,
SiteBackgroundType=None, **kwargs):
"""
Parameters
----------
Name : None|str
SiteName : None|str
SiteNumber : None|str
SceneNumber : None|str
Description : NOne|str
Duration : None|DateTimeRangeType|list|tuple
SiteCenterLocation : LatLonEleType|numpy.ndarray|list|tuple
SceneContentDescription : None|str
SiteBackgroundType : None|str
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Name = Name
self.SiteName = SiteName
self.SiteNumber = SiteNumber
self.SceneNumber = SceneNumber
self.Description = Description
self.Duration = Duration
self.SiteCenterLocation = SiteCenterLocation
self.SceneContentDescription = SceneContentDescription
self.SiteBackgroundType = SiteBackgroundType
super(SubCollectionInfoType, self).__init__(**kwargs)
| 3,552 | 40.313953 | 99 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/FiducialInfo.py | """
Definition for the FiducialInfo NGA modified RDE/AFRL labeling object
"""
__classification__ = "UNCLASSIFIED"
__authors__ = "Thomas McCullough"
from typing import Optional, List
import logging
import numpy
from sarpy.io.xml.base import Serializable
from sarpy.io.xml.descriptors import IntegerDescriptor, SerializableDescriptor, \
SerializableListDescriptor, StringDescriptor
from sarpy.io.complex.sicd_elements.blocks import RowColType
from sarpy.io.complex.sicd_elements.SICD import SICDType
from .base import DEFAULT_STRICT
from .blocks import LatLonEleType, RangeCrossRangeType, \
ProjectionPerturbationType, LabelSourceType
logger = logging.getLogger(__name__)
_no_projection_text = 'This sicd does not permit projection,\n\t' \
'so the image location can not be inferred'
class ImageLocationType(Serializable):
_fields = ('CenterPixel', )
_required = _fields
# descriptors
CenterPixel = SerializableDescriptor(
'CenterPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='The pixel location of the center of the object') # type: RowColType
def __init__(self, CenterPixel=None, **kwargs):
"""
Parameters
----------
CenterPixel : RowColType|numpy.ndarray|list|tuple
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CenterPixel = CenterPixel
super(ImageLocationType, self).__init__(**kwargs)
@classmethod
def from_geolocation(cls, geo_location, the_structure):
"""
Construct from the corresponding Geolocation using the sicd
projection model.
Parameters
----------
geo_location : GeoLocationType
the_structure : SICDType|SIDDType
Returns
-------
None|ImageLocationType
"""
if geo_location is None or geo_location.CenterPixel is None:
return None
if not the_structure.can_project_coordinates():
logger.warning(_no_projection_text)
return None
if isinstance(the_structure, SICDType):
image_shift = numpy.array(
[the_structure.ImageData.FirstRow, the_structure.ImageData.FirstCol], dtype='float64')
else:
image_shift = numpy.zeros((2, ), dtype='float64')
absolute_pixel_location, _, _ = the_structure.project_ground_to_image_geo(
geo_location.CenterPixel.get_array(dtype='float64'), ordering='latlong')
if numpy.any(numpy.isnan(absolute_pixel_location)):
return None
return ImageLocationType(CenterPixel=absolute_pixel_location - image_shift)
class GeoLocationType(Serializable):
_fields = ('CenterPixel', )
_required = _fields
# descriptors
CenterPixel = SerializableDescriptor(
'CenterPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='The physical location of the center of the object') # type: LatLonEleType
def __init__(self, CenterPixel=None, **kwargs):
"""
Parameters
----------
CenterPixel : LatLonEleType|numpy.ndarray|list|tuple
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CenterPixel = CenterPixel
super(GeoLocationType, self).__init__(**kwargs)
# noinspection PyUnusedLocal
@classmethod
def from_image_location(cls, image_location, the_structure, projection_type='HAE', **proj_kwargs):
"""
Construct the geographical location from the image location via
projection using the SICD model.
.. Note::
This assumes that the image coordinates are with respect to the given
image (chip), and NOT including any sicd.ImageData.FirstRow/Col values,
which will be added here.
Parameters
----------
image_location : ImageLocationType
the_structure : SICDType|SIDDType
projection_type : str
The projection type selector, one of `['PLANE', 'HAE', 'DEM']`. Using `'DEM'`
requires configuration for the DEM pathway described in
:func:`sarpy.geometry.point_projection.image_to_ground_dem`.
proj_kwargs
The keyword arguments for the :func:`SICDType.project_image_to_ground_geo` method.
Returns
-------
None|GeoLocationType
Coordinates may be populated as `NaN` if projection fails.
"""
if image_location is None or image_location.CenterPixel is None:
return None
if not the_structure.can_project_coordinates():
logger.warning(_no_projection_text)
return None
# make sure this is defined, for the sake of efficiency
the_structure.define_coa_projection(override=False)
if isinstance(the_structure, SICDType):
image_shift = numpy.array(
[the_structure.ImageData.FirstRow, the_structure.ImageData.FirstCol], dtype='float64')
else:
image_shift = numpy.zeros((2, ), dtype='float64')
coords = image_location.CenterPixel.get_array(dtype='float64') + image_shift
geo_coords = the_structure.project_image_to_ground_geo(
coords, ordering='latlong', projection_type=projection_type, **proj_kwargs)
out = GeoLocationType(CenterPixel=geo_coords)
return out
class PhysicalLocationType(Serializable):
_fields = ('Physical', )
_required = _fields
# descriptors
Physical = SerializableDescriptor(
'Physical', ImageLocationType, _required, strict=DEFAULT_STRICT,
) # type: ImageLocationType
def __init__(self, Physical=None, **kwargs):
"""
Parameters
----------
Physical : ImageLocationType
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Physical = Physical
super(PhysicalLocationType, self).__init__(**kwargs)
class TheFiducialType(Serializable):
_fields = (
'Name', 'SerialNumber', 'FiducialType', 'DatasetFiducialNumber',
'ImageLocation', 'GeoLocation',
'IPRWidth3dB', 'IPRWidth18dB', 'IPRWidth3dB18dBRatio',
'PeakSideLobeRatio', 'IntegratedSideLobeRatio',
'SlantPlane', 'GroundPlane', 'ProjectionPerturbation')
_required = (
'FiducialType', 'ImageLocation', 'GeoLocation')
# descriptors
Name = StringDescriptor(
'Name', _required, strict=DEFAULT_STRICT,
docstring='Name of the fiducial.') # type: Optional[str]
SerialNumber = StringDescriptor(
'SerialNumber', _required, strict=DEFAULT_STRICT,
docstring='The serial number of the fiducial') # type: Optional[str]
FiducialType = StringDescriptor(
'FiducialType',
_required, strict=DEFAULT_STRICT,
docstring='The type of fiducial') # type: str
DatasetFiducialNumber = IntegerDescriptor(
'DatasetFiducialNumber', _required,
docstring='Unique number of the fiducial within the selected dataset, '
'defined by the RDE system') # type: Optional[int]
ImageLocation = SerializableDescriptor(
'ImageLocation', ImageLocationType, _required,
docstring='Center of the fiducial in the image'
) # type: Optional[ImageLocationType]
GeoLocation = SerializableDescriptor(
'GeoLocation', GeoLocationType, _required,
docstring='Real physical location of the fiducial'
) # type: Optional[GeoLocationType]
IPRWidth3dB = SerializableDescriptor(
'IPRWidth3dB', RangeCrossRangeType, _required,
docstring='The 3 dB impulse response width, in meters'
) # type: Optional[RangeCrossRangeType]
IPRWidth18dB = SerializableDescriptor(
'IPRWidth18dB', RangeCrossRangeType, _required,
docstring='The 18 dB impulse response width, in meters'
) # type: Optional[RangeCrossRangeType]
IPRWidth3dB18dBRatio = SerializableDescriptor(
'IPRWidth3dB18dBRatio', RangeCrossRangeType, _required,
docstring='Ratio of the 3 dB to 18 dB system impulse response width'
) # type: Optional[RangeCrossRangeType]
PeakSideLobeRatio = SerializableDescriptor(
'PeakSideLobeRatio', RangeCrossRangeType, _required,
docstring='Ratio of the peak sidelobe intensity to the peak mainlobe intensity, '
'in dB') # type: Optional[RangeCrossRangeType]
IntegratedSideLobeRatio = SerializableDescriptor(
'IntegratedSideLobeRatio', RangeCrossRangeType, _required,
docstring='Ratio of all the energies in the sidelobes of the '
'system impulse response to the energy in the mainlobe, '
'in dB') # type: Optional[RangeCrossRangeType]
SlantPlane = SerializableDescriptor(
'SlantPlane', PhysicalLocationType, _required,
docstring='Center of the object in the slant plane'
) # type: Optional[PhysicalLocationType]
GroundPlane = SerializableDescriptor(
'GroundPlane', PhysicalLocationType, _required,
docstring='Center of the object in the ground plane'
) # type: Optional[PhysicalLocationType]
ProjectionPerturbation = SerializableDescriptor(
'ProjectionPerturbation', ProjectionPerturbationType, _required,
docstring='') # type: Optional[ProjectionPerturbationType]
def __init__(self, Name=None, SerialNumber=None, FiducialType=None,
DatasetFiducialNumber=None, ImageLocation=None, GeoLocation=None,
IPRWidth3dB=None, IPRWidth18dB=None, IPRWidth3dB18dBRatio=None,
PeakSideLobeRatio=None, IntegratedSideLobeRatio=None,
SlantPlane=None, GroundPlane=None, ProjectionPerturbation=None,
**kwargs):
"""
Parameters
----------
Name : str
SerialNumber : None|str
FiducialType : str
DatasetFiducialNumber : None|int
ImageLocation : ImageLocationType
GeoLocation : GeoLocationType
IPRWidth3dB : None|RangeCrossRangeType|numpy.ndarray|list|tuple
IPRWidth18dB : None|RangeCrossRangeType|numpy.ndarray|list|tuple
IPRWidth3dB18dBRatio : None|RangeCrossRangeType|numpy.ndarray|list|tuple
PeakSideLobeRatio : None|RangeCrossRangeType|numpy.ndarray|list|tuple
IntegratedSideLobeRatio : None|RangeCrossRangeType|numpy.ndarray|list|tuple
SlantPlane : None|PhysicalLocationType
GroundPlane : None|PhysicalLocationType
ProjectionPerturbation : None|ProjectionPerturbationType
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Name = Name
self.SerialNumber = SerialNumber
self.FiducialType = FiducialType
self.DatasetFiducialNumber = DatasetFiducialNumber
self.ImageLocation = ImageLocation
self.GeoLocation = GeoLocation
self.IPRWidth3dB = IPRWidth3dB
self.IPRWidth18dB = IPRWidth18dB
self.IPRWidth3dB18dBRatio = IPRWidth3dB18dBRatio
self.PeakSideLobeRatio = PeakSideLobeRatio
self.IntegratedSideLobeRatio = IntegratedSideLobeRatio
self.SlantPlane = SlantPlane
self.GroundPlane = GroundPlane
self.ProjectionPerturbation = ProjectionPerturbation
super(TheFiducialType, self).__init__(**kwargs)
def set_image_location_from_sicd(self, sicd, populate_in_periphery=False):
"""
Set the image location information with respect to the given SICD.
Parameters
----------
sicd : SICDType
populate_in_periphery : bool
Returns
-------
int
-1 - insufficient metadata to proceed
0 - nothing to be done
1 - successful
2 - object in image periphery, not populating
3 - object not in image field
"""
if self.ImageLocation is not None or self.SlantPlane is not None:
# no need to infer anything, it's already populated
return 0
if self.GeoLocation is None:
logger.warning(
'GeoLocation is not populated,\n\t'
'so the image location can not be inferred')
return -1
if not sicd.can_project_coordinates():
logger.warning(_no_projection_text)
return -1
image_location = ImageLocationType.from_geolocation(self.GeoLocation, sicd)
# check bounding information
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
center_pixel = image_location.CenterPixel.get_array(dtype='float64')
if (0 < center_pixel[0] < rows - 1) and (0 < center_pixel[1] < cols - 1):
placement = 1
elif (-3 < center_pixel[0] < rows + 2) and (-3 < center_pixel[1] < cols + 2):
placement = 2
else:
placement = 3
if placement == 3 or (placement == 2 and not populate_in_periphery):
return placement
self.ImageLocation = image_location
self.SlantPlane = PhysicalLocationType(Physical=image_location)
return placement
def set_geo_location_from_sicd(self, sicd, projection_type='HAE', **proj_kwargs):
"""
Set the geographical location information with respect to the given SICD,
assuming that the image coordinates are populated.
.. Note::
This assumes that the image coordinates are with respect to the given
image (chip), and NOT including any sicd.ImageData.FirstRow/Col values,
which will be added here.
Parameters
----------
sicd : SICDType
projection_type : str
The projection type selector, one of `['PLANE', 'HAE', 'DEM']`. Using `'DEM'`
requires configuration for the DEM pathway described in
:func:`sarpy.geometry.point_projection.image_to_ground_dem`.
proj_kwargs
The keyword arguments for the :func:`SICDType.project_image_to_ground_geo` method.
"""
if self.GeoLocation is not None:
# no need to infer anything, it's already populated
return
if self.ImageLocation is None:
logger.warning(
'ImageLocation is not populated,\n\t'
'so the geographical location can not be inferred')
return
if not sicd.can_project_coordinates():
logger.warning(_no_projection_text)
return
self.GeoLocation = GeoLocationType.from_image_location(
self.ImageLocation, sicd, projection_type=projection_type, **proj_kwargs)
class FiducialInfoType(Serializable):
_fields = (
'NumberOfFiducialsInImage', 'NumberOfFiducialsInScene', 'LabelSource', 'Fiducials')
_required = (
'NumberOfFiducialsInImage', 'NumberOfFiducialsInScene', 'LabelSource', 'Fiducials')
_collections_tags = {'Fiducials': {'array': False, 'child_tag': 'Fiducial'}}
# descriptors
NumberOfFiducialsInImage = IntegerDescriptor(
'NumberOfFiducialsInImage', _required, strict=DEFAULT_STRICT,
docstring='Number of ground truthed objects in the image.') # type: int
NumberOfFiducialsInScene = IntegerDescriptor(
'NumberOfFiducialsInScene', _required, strict=DEFAULT_STRICT,
docstring='Number of ground truthed objects in the scene.') # type: int
LabelSource = SerializableDescriptor(
'LabelSource', LabelSourceType, _required, strict=DEFAULT_STRICT,
docstring='The source of the labels') # type: LabelSourceType
Fiducials = SerializableListDescriptor(
'Fiducials', TheFiducialType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='The object collection') # type: List[TheFiducialType]
def __init__(self, NumberOfFiducialsInImage=None, NumberOfFiducialsInScene=None,
LabelSource=None, Fiducials=None, **kwargs):
"""
Parameters
----------
NumberOfFiducialsInImage : int
NumberOfFiducialsInScene : int
LabelSource : LabelSourceType
Fiducials : None|List[TheFiducialType]
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.NumberOfFiducialsInImage = NumberOfFiducialsInImage
self.NumberOfFiducialsInScene = NumberOfFiducialsInScene
self.LabelSource = LabelSource
self.Fiducials = Fiducials
super(FiducialInfoType, self).__init__(**kwargs)
def set_image_location_from_sicd(
self, sicd, populate_in_periphery=False, include_out_of_range=False):
"""
Set the image location information with respect to the given SICD,
assuming that the physical coordinates are populated. The `NumberOfFiducialsInImage`
will be set, and `NumberOfFiducialsInScene` will be left unchanged.
Parameters
----------
sicd : SICDType
populate_in_periphery : bool
Populate image information for objects on the periphery?
include_out_of_range : bool
Include the objects which are out of range (with no image location information)?
"""
def update_fiducial(temp_fid, in_image_count):
status = temp_fid.set_image_location_from_sicd(sicd, populate_in_periphery=populate_in_periphery)
use_fid = False
if status == 0:
raise ValueError('Fiducial already has image details set')
if status == 1 or (status == 2 and populate_in_periphery):
use_fid = True
in_image_count += 1
return use_fid, in_image_count
fid_in_image = 0
if include_out_of_range:
# the fiducials list is just modified in place
for the_fid in self.Fiducials:
_, fid_in_image = update_fiducial(the_fid, fid_in_image)
else:
# the fiducials list is just modified in place
fiducials = []
for the_fid in self.Fiducials:
use_this_fid, fid_in_image = update_fiducial(the_fid, fid_in_image)
if use_this_fid:
fiducials.append(the_fid)
self.Fiducials = fiducials
self.NumberOfFiducialsInImage = fid_in_image
| 19,093 | 38.862213 | 109 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/annotation/afrl_rde_elements/blocks.py | """
Common use elements for the NGA modified RDE/AFRL labeling definition
"""
__classification__ = "UNCLASSIFIED"
__authors__ = "Thomas McCullough"
import numpy
from datetime import date, datetime
from typing import Optional
from sarpy.io.xml.base import Serializable, Arrayable
from sarpy.io.xml.descriptors import DateTimeDescriptor, FloatDescriptor, \
SerializableDescriptor, StringEnumDescriptor, StringDescriptor
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.complex.sicd_elements.blocks import XYZType
from .base import DEFAULT_STRICT
class DateRangeType(Serializable, Arrayable):
"""
A range of dates with resolution of 1 day
"""
_fields = ('Begin', 'End')
_required = _fields
# descriptors
Begin = DateTimeDescriptor(
'Begin', _required, strict=DEFAULT_STRICT, numpy_datetime_units='D',
docstring="Begin date of the data collection.") # type: Optional[numpy.datetime64]
End = DateTimeDescriptor(
'End', _required, strict=DEFAULT_STRICT, numpy_datetime_units='D',
docstring="End date of the data collection.") # type: Optional[numpy.datetime64]
def __init__(self, Begin=None, End=None, **kwargs):
"""
Parameters
----------
Begin : None|numpy.datetime64|str|datetime|date
End : None|numpy.datetime64|str|datetime|date
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Begin = Begin
self.End = End
super(DateRangeType, self).__init__(**kwargs)
def get_array(self, dtype='datetime64[D]'):
"""
Gets an array representation of the class instance.
Parameters
----------
dtype : str|numpy.dtype
data type of the return
Returns
-------
numpy.ndarray
data array
"""
return numpy.array([self.Begin, self.End], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Begin, End]
Returns
-------
DateRangeType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 2:
raise ValueError('Expected array to be of length 2, and received {}'.format(array))
return cls(Begin=array[0], End=array[1])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class DateTimeRangeType(Serializable, Arrayable):
"""
A range of dates with resolution of 1 day
"""
_fields = ('Begin', 'End')
_required = _fields
# descriptors
Begin = DateTimeDescriptor(
'Begin', _required, strict=DEFAULT_STRICT, numpy_datetime_units='s',
docstring="Begin date/time of the data collection.") # type: Optional[numpy.datetime64]
End = DateTimeDescriptor(
'End', _required, strict=DEFAULT_STRICT, numpy_datetime_units='s',
docstring="End date/time of the data collection.") # type: Optional[numpy.datetime64]
def __init__(self, Begin=None, End=None, **kwargs):
"""
Parameters
----------
Begin : None|numpy.datetime64|str|datetime|date
End : None|numpy.datetime64|str|datetime|date
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Begin = Begin
self.End = End
super(DateTimeRangeType, self).__init__(**kwargs)
def get_array(self, dtype='datetime64[D]'):
"""
Gets an array representation of the class instance.
Parameters
----------
dtype : str|numpy.dtype
data type of the return
Returns
-------
numpy.ndarray
data array
"""
return numpy.array([self.Begin, self.End], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Begin, End]
Returns
-------
DateTimeRangeType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 2:
raise ValueError('Expected array to be of length 2, and received {}'.format(array))
return cls(Begin=array[0], End=array[1])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class RangeCrossRangeType(Serializable, Arrayable):
"""
A range and cross range attribute container
"""
_fields = ('Range', 'CrossRange')
_required = _fields
_numeric_format = {key: '0.17G' for key in _fields}
# descriptors
Range = FloatDescriptor(
'Range', _required, strict=True, docstring='The Range attribute.') # type: float
CrossRange = FloatDescriptor(
'CrossRange', _required, strict=True, docstring='The Cross Range attribute.') # type: float
def __init__(self, Range=None, CrossRange=None, **kwargs):
"""
Parameters
----------
Range : float
CrossRange : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Range, self.CrossRange = Range, CrossRange
super(RangeCrossRangeType, self).__init__(**kwargs)
def get_array(self, dtype='float64'):
"""
Gets an array representation of the class instance.
Parameters
----------
dtype : str|numpy.dtype|numpy.number
numpy data type of the return
Returns
-------
numpy.ndarray
array of the form [Range, CrossRange]
"""
return numpy.array([self.Range, self.CrossRange], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Range, CrossRange]
Returns
-------
RangeCrossRangeType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 2:
raise ValueError('Expected array to be of length 2, and received {}'.format(array))
return cls(Range=array[0], CrossRange=array[1])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class RowColDoubleType(Serializable, Arrayable):
_fields = ('Row', 'Col')
_required = _fields
_numeric_format = {key: '0.17G' for key in _fields}
# Descriptors
Row = FloatDescriptor(
'Row', _required, strict=True, docstring='The Row attribute.') # type: float
Col = FloatDescriptor(
'Col', _required, strict=True, docstring='The Column attribute.') # type: float
def __init__(self, Row=None, Col=None, **kwargs):
"""
Parameters
----------
Row : float
Col : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Row, self.Col = Row, Col
super(RowColDoubleType, self).__init__(**kwargs)
def get_array(self, dtype='float64'):
"""
Gets an array representation of the class instance.
Parameters
----------
dtype : str|numpy.dtype|numpy.number
numpy data type of the return
Returns
-------
numpy.ndarray
array of the form [Row, Col]
"""
return numpy.array([self.Row, self.Col], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Row, Col]
Returns
-------
RowColDoubleType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 2:
raise ValueError('Expected array to be of length 2, and received {}'.format(array))
return cls(Row=array[0], Col=array[1])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class LatLonEleType(Serializable, Arrayable):
"""A three-dimensional geographic point in WGS-84 coordinates."""
_fields = ('Lat', 'Lon', 'Ele')
_required = _fields
_numeric_format = {'Lat': '0.17G', 'Lon': '0.17G', 'Ele': '0.17G'}
# descriptors
Lat = FloatDescriptor(
'Lat', _required, strict=True,
docstring='The latitude attribute. Assumed to be WGS-84 coordinates.'
) # type: float
Lon = FloatDescriptor(
'Lon', _required, strict=True,
docstring='The longitude attribute. Assumed to be WGS-84 coordinates.'
) # type: float
Ele = FloatDescriptor(
'Ele', _required, strict=True,
docstring='The Height Above Ellipsoid (in meters) attribute. '
'Assumed to be WGS-84 coordinates.') # type: float
def __init__(self, Lat=None, Lon=None, Ele=None, **kwargs):
"""
Parameters
----------
Lat : float
Lon : float
Ele : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Lat = Lat
self.Lon = Lon
self.Ele = Ele
super(LatLonEleType, self).__init__(Lat=Lat, Lon=Lon, **kwargs)
def get_array(self, dtype=numpy.float64):
"""
Gets an array representation of the data.
Parameters
----------
dtype : str|numpy.dtype|numpy.number
data type of the return
Returns
-------
numpy.ndarray
data array with appropriate entry order
"""
return numpy.array([self.Lat, self.Lon, self.Ele], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Lat, Lon, Ele]
Returns
-------
LatLonEleType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 3:
raise ValueError('Expected array to be of length 3, and received {}'.format(array))
return cls(Lat=array[0], Lon=array[1], Ele=array[2])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class ProjectionPerturbationType(Serializable):
"""
Basic information required for SICD/SIDD projection model perturbation.
"""
_fields = ('CoordinateFrame', 'DeltaArp', 'DeltaVarp', 'DeltaRange')
_required = ('CoordinateFrame', )
_numeric_format = {'Lat': '0.17G', }
CoordinateFrame = StringEnumDescriptor(
'CoordinateFrame', {'ECF', 'RIC_ECI', 'RIC_ECF'}, _required) # type: str
DeltaArp = SerializableDescriptor(
'DeltaArp', XYZType, _required) # type: XYZType
DeltaVarp = SerializableDescriptor(
'DeltaVarp', XYZType, _required) # type: XYZType
DeltaRange = FloatDescriptor(
'DeltaRange', _required) # type: float
def __init__(self, CoordinateFrame=None, DeltaArp=None, DeltaVarp=None, DeltaRange=None, **kwargs):
"""
Parameters
----------
CoordinateFrame : str
DeltaArp : None|XYZType|numpy.ndarray|list|tuple
DeltaVarp : None|XYZType|numpy.ndarray|list|tuple
DeltaRange : None|float
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CoordinateFrame = CoordinateFrame
self.DeltaArp = DeltaArp
self.DeltaVarp = DeltaVarp
self.DeltaRange = DeltaRange
super(ProjectionPerturbationType, self).__init__(**kwargs)
def set_coa_projection(self, structure):
"""
Sets the sicd or sidd coa_projection property, as appropriate.
Parameters
----------
structure : SICDType|SIDDType1|SIDDType2
"""
if not isinstance(structure, (SICDType, SIDDType1, SIDDType2)):
raise TypeError('Requires input of type SICDType or SIDDType, got {}'.format(type(structure)))
structure.define_coa_projection(
delta_arp=None if self.DeltaArp is None else self.DeltaArp.get_array(dtype='float64'),
delta_varp=None if self.DeltaVarp is None else self.DeltaVarp.get_array(dtype='float64'),
range_bias=self.DeltaRange,
adj_params_frame=self.CoordinateFrame,
override=True)
class LabelSourceType(Serializable):
_fields = ('SourceType', 'SourceID', 'Description')
_required = ('SourceType', )
SourceType = StringEnumDescriptor(
'SourceType', {
'Ground Truth', 'Analyst Truth', 'Algorithm Truth', 'Other', 'Unknown'},
_required,
docstring='The source type of the labeling effort') # type: str
SourceID = StringDescriptor(
'SourceID', _required,
docstring='The "ID" of the labeling source. '
'This should be populated following program guidance.') # type: Optional[str]
Description = StringDescriptor(
'Description', _required,
docstring='A description of the labeling source') # type: Optional[str]
def __init__(self, SourceType='Unknown', SourceID=None, Description=None, **kwargs):
"""
Parameters
----------
SourceType : str
SourceID : None|str
Description : None|str
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.SourceType = SourceType
self.SourceID = SourceID
self.Description = Description
super(LabelSourceType, self).__init__(**kwargs)
| 15,226 | 30.591286 | 106 | py |
sarpy | sarpy-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath('..')) # set path for project import
from sarpy import __about__ as parameters # fetch our relevant project details
# -- Project information -----------------------------------------------------
project = 'sarpy'
version = parameters.__version__ # The full version, including alpha/beta/rc tags
release = parameters.__version__
copyright = parameters.__copyright__
author = parameters.__author__
html_logo = 'nga_logo.jpeg'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# control the order of elements shown by autodoc
autodoc_member_order = 'bysource'
| 2,007 | 36.185185 | 82 | py |
Tiny-NewsRec | Tiny-NewsRec-main/split_file.py | import random
from tqdm import tqdm
import tensorflow as tf
def get_sample(all_element, num_sample):
if num_sample > len(all_element):
return random.sample(all_element * (num_sample // len(all_element) + 1), num_sample)
else:
return random.sample(all_element, num_sample)
N = 4
behaviors = []
with open('./MIND/MINDlarge_train/behaviors.tsv') as f:
for line in tqdm(f):
iid, uid, time, history, imp = line.strip().split('\t')
impressions = [x.split('-') for x in imp.split(' ')]
pos, neg = [], []
for news_ID, label in impressions:
if int(label) == 0:
neg.append(news_ID)
elif int(label) == 1:
pos.append(news_ID)
if len(pos) == 0:
continue
for pos_id in pos:
neg_candidate = get_sample(neg, 4)
neg_str = ' '.join(neg_candidate)
new_line = '\t'.join([iid, uid, time, history, pos_id, neg_str]) + '\n'
behaviors.append(new_line)
print(len(behaviors))
random.shuffle(behaviors)
split_behaviors = [[] for _ in range(N)]
for i, line in enumerate(behaviors):
split_behaviors[i % N].append(line)
for i in range(N):
with tf.io.gfile.GFile(f'./MIND/MINDlarge_train/behaviors_np4_{i}.tsv', 'w') as f:
for line in split_behaviors[i]:
f.write(line)
| 1,365 | 30.045455 | 92 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/dataloader.py | import sys
import traceback
import logging
import random
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import torch
from torch.utils.data import IterableDataset
from streaming import StreamSampler
def news_sample(news, ratio):
if ratio > len(news):
return news + [0] * (ratio - len(news))
else:
return random.sample(news, ratio)
class DataLoaderTrain(IterableDataset):
def __init__(self,
data_dir,
filename_pat,
args,
world_size,
worker_rank,
cuda_device_idx,
news_index,
news_combined,
teacher_embs,
word_dict,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.shuffle_buffer_size = args.shuffle_buffer_size
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = -1
self.num_teachers = args.num_teachers
self.teacher_embs = teacher_embs
self.news_combined = news_combined
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_buffer_size=self.shuffle_buffer_size,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
self.sampler.__iter__()
def trans_to_nindex(self, nids):
return [self.news_index[i] if i in self.news_index else 0 for i in nids]
def pad_to_fix_len(self, x, fix_length, padding_front=True, padding_value=0):
if padding_front:
pad_x = [padding_value] * (fix_length-len(x)) + x[-fix_length:]
mask = [0] * (fix_length-len(x)) + [1] * min(fix_length, len(x))
else:
pad_x = x[-fix_length:] + [padding_value]*(fix_length-len(x))
mask = [1] * min(fix_length, len(x)) + [0] * (fix_length-len(x))
return pad_x, mask
def _produce(self):
# need to reset cuda device in produce thread.
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def start_async(self):
self.aval_count = 0
self.stopped = False
self.outputs = Queue(10)
self.pool = ThreadPoolExecutor(1)
self.pool.submit(self._produce)
def _process(self, batch):
batch = [x.decode(encoding="utf-8").split("\t") for x in batch]
user_feature_batch, log_mask_batch, news_feature_batch, label_batch = [], [], [], []
teacher_history_batch, teacher_candidate_batch = [[] for _ in range(
self.num_teachers)], [[] for _ in range(self.num_teachers)]
for line in batch:
click_docs = line[3].split()
sess_pos = line[4].split()
sess_neg = line[5].split()
click_docs, log_mask = self.pad_to_fix_len(
self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_combined[click_docs]
pos = self.trans_to_nindex(sess_pos)
neg = self.trans_to_nindex(sess_neg)
label = random.randint(0, self.npratio)
sample_news = neg[:label] + pos + neg[label:]
news_feature = self.news_combined[sample_news]
for i in range(self.num_teachers):
teacher_history_batch[i].append(
self.teacher_embs[i][click_docs])
teacher_candidate_batch[i].append(
self.teacher_embs[i][sample_news])
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(label)
if self.enable_gpu:
user_feature_batch = torch.LongTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
news_feature_batch = torch.LongTensor(news_feature_batch).cuda()
label_batch = torch.LongTensor(label_batch).cuda()
for i in range(self.num_teachers):
teacher_history_batch[i] = torch.FloatTensor(
teacher_history_batch[i]).cuda()
teacher_candidate_batch[i] = torch.FloatTensor(
teacher_candidate_batch[i]).cuda()
else:
user_feature_batch = torch.LongTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
news_feature_batch = torch.LongTensor(news_feature_batch)
label_batch = torch.LongTensor(label_batch)
for i in range(self.num_teachers):
teacher_history_batch[i] = torch.FloatTensor(
teacher_history_batch[i])
teacher_candidate_batch[i] = torch.FloatTensor(
teacher_candidate_batch[i])
return user_feature_batch, log_mask_batch, news_feature_batch, label_batch, teacher_history_batch, teacher_candidate_batch
def __iter__(self):
"""Implement IterableDataset method to provide data iterator."""
logging.info("DataLoader __iter__()")
if self.enable_prefetch:
self.join()
self.start_async()
else:
self.start()
return self
def __next__(self):
if self.sampler and self.sampler.reach_end() and self.aval_count == 0:
raise StopIteration
if self.enable_prefetch:
next_batch = self.outputs.get()
self.outputs.task_done()
self.aval_count -= 1
else:
next_batch = self._process(self.sampler.__next__())
return next_batch
def join(self):
self.stopped = True
if self.sampler:
if self.enable_prefetch:
while self.outputs.qsize() > 0:
self.outputs.get()
self.outputs.task_done()
self.outputs.join()
self.pool.shutdown(wait=True)
logging.info("shut down pool.")
self.sampler = None
class DataLoaderTest(DataLoaderTrain):
def __init__(self,
data_dir,
filename_pat,
args,
world_size,
worker_rank,
cuda_device_idx,
news_index,
news_scoring,
word_dict,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = -1
self.news_scoring = news_scoring
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
self.sampler.__iter__()
def _produce(self):
# need to reset cuda device in produce thread.
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
# t0 = time.time()
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
# logging.info(f"_produce cost:{time.time()-t0}")
# t0 = time.time()
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def _process(self, batch):
batch_size = len(batch)
batch = [x.decode(encoding="utf-8").split("\t") for x in batch]
user_feature_batch, log_mask_batch, news_feature_batch, label_batch = [], [], [], []
for line in batch:
click_docs = line[3].split()
click_docs, log_mask = self.pad_to_fix_len(
self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_scoring[click_docs]
sample_news = self.trans_to_nindex(
[i.split('-')[0] for i in line[4].split()])
labels = [int(i.split('-')[1]) for i in line[4].split()]
news_feature = self.news_scoring[sample_news]
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(np.array(labels))
if self.enable_gpu:
user_feature_batch = torch.FloatTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
else:
user_feature_batch = torch.FloatTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
return user_feature_batch, log_mask_batch, news_feature_batch, label_batch
| 11,565 | 35.71746 | 130 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/utils.py | import logging
import os
import sys
import torch
import numpy as np
import argparse
import re
from tnlrv3.modeling import TuringNLRv3ForSequenceClassification
from tnlrv3.configuration_tnlrv3 import TuringNLRv3Config
from tnlrv3.tokenization_tnlrv3 import TuringNLRv3Tokenizer
from transformers import BertTokenizer, BertConfig, BertModel
from transformers import RobertaTokenizer, RobertaConfig, RobertaModel
MODEL_CLASSES = {
'tnlrv3': (TuringNLRv3Config, TuringNLRv3ForSequenceClassification, TuringNLRv3Tokenizer),
'bert': (BertConfig, BertModel, BertTokenizer),
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)
}
def word_tokenize(sent):
pat = re.compile(r'[\w]+|[.,!?;|]')
if isinstance(sent, str):
return pat.findall(sent.lower())
else:
return []
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def init_hvd_cuda(enable_hvd=True, enable_gpu=True):
hvd = None
if enable_hvd:
import horovod.torch as hvd
hvd.init()
logging.info(
f"hvd_size:{hvd.size()}, hvd_rank:{hvd.rank()}, hvd_local_rank:{hvd.local_rank()}"
)
hvd_size = hvd.size() if enable_hvd else 1
hvd_rank = hvd.rank() if enable_hvd else 0
hvd_local_rank = hvd.local_rank() if enable_hvd else 0
if enable_gpu:
torch.cuda.set_device(hvd_local_rank)
return hvd_size, hvd_rank, hvd_local_rank
def setuplogger():
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s %(asctime)s] %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
def dump_args(args):
for arg in dir(args):
if not arg.startswith("_"):
logging.info(f"args[{arg}]={getattr(args, arg)}")
def acc(y_true, y_hat):
y_hat = torch.argmax(y_hat, dim=-1)
tot = y_true.shape[0]
hit = torch.sum(y_true == y_hat)
return hit.data.float() * 1.0 / tot
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2**y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def load_matrix(embedding_file_path, word_dict, word_embedding_dim):
embedding_matrix = np.zeros(shape=(len(word_dict) + 1,
word_embedding_dim))
have_word = []
if embedding_file_path is not None:
with open(embedding_file_path, 'rb') as f:
while True:
line = f.readline()
if len(line) == 0:
break
line = line.split()
word = line[0].decode()
if word in word_dict:
index = word_dict[word]
tp = [float(x) for x in line[1:]]
embedding_matrix[index] = np.array(tp)
have_word.append(word)
return embedding_matrix, have_word
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def get_checkpoint(directory, ckpt_name):
ckpt_path = os.path.join(directory, ckpt_name)
if os.path.exists(ckpt_path):
return ckpt_path
else:
return None
| 4,173 | 27.589041 | 94 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/run.py | import numpy as np
import torch
import logging
from tqdm.auto import tqdm
import torch.optim as optim
import utils
import os
from pathlib import Path
import random
from dataloader import DataLoaderTrain, DataLoaderTest
from torch.utils.data import Dataset, DataLoader
from streaming import get_stat, get_worker_files
import pickle
from parameters import parse_args
from preprocess import read_news_bert, get_doc_input_bert
from model_bert import Model
def train(args):
if args.enable_hvd:
import horovod.torch as hvd
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
stat = get_stat(args.train_data_dir, args.filename_pat)
print(stat)
data_paths = get_worker_files(args.train_data_dir,
hvd_rank, hvd_size, args.filename_pat, args.enable_shuffle, 0
)
sample_num = 0
for file in data_paths:
sample_num += stat[file]
logging.info("[{}] contains {} samples {} steps".format(
hvd_rank, sample_num, sample_num // args.batch_size))
news, news_index, category_dict, subcategory_dict = read_news_bert(
os.path.join(args.train_data_dir, 'news.tsv'), args, mode='train'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=-1)
model = Model(args)
teacher_embs = []
model_dict = model.state_dict()
loaded_key = []
remain_key = list(model_dict.keys())
for i, (teacher_ckpt, teacher_emb) in enumerate(zip(args.teacher_ckpts, args.teacher_emb_paths)):
ckpt = torch.load(teacher_ckpt, map_location='cpu')
teacher_dict = ckpt["model_state_dict"]
for k, v in teacher_dict.items():
if not k.startswith('user_encoder'):
continue
key = '.'.join(['teachers', str(i)] + k.split('.')[1:])
model_dict[key].copy_(v)
loaded_key.append(key)
remain_key.remove(key)
del ckpt
with open(teacher_emb, 'rb') as f:
teacher_embs.append(pickle.load(f))
if args.use_pretrain_model:
ckpt = torch.load(args.pretrain_model_path, map_location='cpu')
pretrained_dict = ckpt["model_state_dict"]
for k, v in pretrained_dict.items():
if not k.startswith('student'):
continue
# key = 'student.' + k
model_dict[k].copy_(v)
loaded_key.append(k)
remain_key.remove(k)
model_dict.update(model_dict)
model.load_state_dict(model_dict)
if hvd_rank == 0:
logging.info(f"loaded teacher models: {args.teacher_ckpts}")
print(f'{len(loaded_key)} loaded parameters:')
for k in loaded_key:
print(f'\t{k}')
print(f'{len(remain_key)} initialized parameters:')
for k in remain_key:
print(f'\t{k}')
torch.cuda.empty_cache()
for param in model.teachers.parameters():
param.requires_grad = False
if args.model_type == 'tnlrv3':
for param in model.student.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.student.news_encoder.bert_model.bert.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
else:
for param in model.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.news_encoder.bert_model.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
word_dict = None
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
checkpoint = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
logging.info(f"Model loaded from {ckpt_path}")
if args.enable_gpu:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, amsgrad=True)
if hvd_rank == 0:
print(model)
for name, param in model.named_parameters():
print(name, param.requires_grad)
if args.enable_hvd:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
compression = hvd.Compression.none
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Average)
dataloader = DataLoaderTrain(
teacher_embs=teacher_embs,
news_index=news_index,
news_combined=news_combined,
word_dict=word_dict,
data_dir=args.train_data_dir,
filename_pat=args.filename_pat,
args=args,
world_size=hvd_size,
worker_rank=hvd_rank,
cuda_device_idx=hvd_local_rank,
enable_prefetch=True,
enable_shuffle=True,
enable_gpu=args.enable_gpu,
)
if args.tensorboard is not None:
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir=f'{args.tensorboard}/worker_{hvd_rank}')
logging.info('Training...')
g_step = 0
for ep in range(args.start_epoch, args.epochs):
LOSS, ACC = 0.0, 0.0
for cnt, (log_ids, log_mask, input_ids, targets, teacher_history, teacher_candidate) in enumerate(dataloader):
if cnt > args.max_steps_per_epoch:
break
total_loss, distill_loss, emb_loss, target_loss, y_student = model(
log_ids, log_mask, input_ids, targets, teacher_history, teacher_candidate)
accuracy = utils.acc(targets, y_student)
LOSS += total_loss
ACC += accuracy
if args.tensorboard:
writer.add_scalar("total_loss", total_loss, g_step)
writer.add_scalar("emb_loss", emb_loss, g_step)
writer.add_scalar("distill_loss", distill_loss, g_step)
writer.add_scalar("target_loss", target_loss, g_step)
writer.add_scalar("acc", accuracy, g_step)
writer.add_scalar("W", model.W, g_step)
g_step += 1
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if cnt % args.log_steps == 0:
logging.info(
'[{}] Ed: {}, train_loss: {:.5f}, acc: {:.5f}'.format(
hvd_rank, cnt * args.batch_size, LOSS.data / cnt, ACC / cnt))
print(ep + 1)
# save model last of epoch
if hvd_rank == 0:
ckpt_path = os.path.join(args.model_dir, f'epoch-{ep+1}.pt')
torch.save(
{
'model_state_dict': model.state_dict(),
'category_dict': category_dict,
'word_dict': word_dict,
'subcategory_dict': subcategory_dict
}, ckpt_path)
logging.info(f"Model saved to {ckpt_path}")
dataloader.join()
def test(args):
if args.enable_hvd:
import horovod.torch as hvd
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
assert ckpt_path is not None, 'No ckpt found'
checkpoint = torch.load(ckpt_path)
subcategory_dict = checkpoint['subcategory_dict']
category_dict = checkpoint['category_dict']
word_dict = checkpoint['word_dict']
model = Model(args)
if args.enable_gpu:
model.cuda()
model.load_state_dict(checkpoint['model_state_dict'])
logging.info(f"Model loaded from {ckpt_path}")
if args.enable_hvd:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
model.eval()
torch.set_grad_enabled(False)
news, news_index = read_news_bert(
os.path.join(args.test_data_dir, 'news.tsv'), args, mode='test'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=1)
class NewsDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.data.shape[0]
def news_collate_fn(arr):
arr = torch.LongTensor(arr)
return arr
news_dataset = NewsDataset(news_combined)
news_dataloader = DataLoader(news_dataset,
batch_size=args.batch_size * 4,
num_workers=args.num_workers,
collate_fn=news_collate_fn)
news_scoring = []
with torch.no_grad():
for input_ids in tqdm(news_dataloader):
input_ids = input_ids.cuda()
news_vec = model.student.news_encoder(input_ids)
news_vec = news_vec.to(torch.device("cpu")).detach().numpy()
news_scoring.extend(news_vec)
news_scoring = np.array(news_scoring)
logging.info("news scoring num: {}".format(news_scoring.shape[0]))
doc_sim = 0
for _ in tqdm(range(1000000)):
i = random.randrange(1, len(news_scoring))
j = random.randrange(1, len(news_scoring))
if i != j:
doc_sim += np.dot(news_scoring[i], news_scoring[j]) / (
np.linalg.norm(news_scoring[i]) * np.linalg.norm(news_scoring[j]))
print(f'=== doc-sim: {doc_sim / 1000000} ===')
dataloader = DataLoaderTest(
news_index=news_index,
news_scoring=news_scoring,
word_dict=word_dict,
data_dir=args.test_data_dir,
filename_pat=args.filename_pat,
args=args,
world_size=hvd_size,
worker_rank=hvd_rank,
cuda_device_idx=hvd_local_rank,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=args.enable_gpu,
)
from metrics import roc_auc_score, ndcg_score, mrr_score
AUC = []
MRR = []
nDCG5 = []
nDCG10 = []
def print_metrics(hvd_local_rank, cnt, x):
logging.info("[{}] Ed: {}: {}".format(hvd_local_rank, cnt,
'\t'.join(["{:0.2f}".format(i * 100) for i in x])))
def get_mean(arr):
return [np.array(i).mean() for i in arr]
def get_sum(arr):
return [np.array(i).sum() for i in arr]
local_sample_num = 0
for cnt, (log_vecs, log_mask, news_vecs, labels) in enumerate(dataloader):
local_sample_num += log_vecs.shape[0]
if args.enable_gpu:
log_vecs = log_vecs.cuda(non_blocking=True)
log_mask = log_mask.cuda(non_blocking=True)
user_vecs = model.student.user_encoder(log_vecs, log_mask).to(
torch.device("cpu")).detach().numpy()
for user_vec, news_vec, label in zip(user_vecs, news_vecs, labels):
if label.mean() == 0 or label.mean() == 1:
continue
score = np.dot(news_vec, user_vec)
auc = roc_auc_score(label, score)
mrr = mrr_score(label, score)
ndcg5 = ndcg_score(label, score, k=5)
ndcg10 = ndcg_score(label, score, k=10)
AUC.append(auc)
MRR.append(mrr)
nDCG5.append(ndcg5)
nDCG10.append(ndcg10)
if cnt % args.log_steps == 0:
print_metrics(hvd_rank, local_sample_num,
get_mean([AUC, MRR, nDCG5, nDCG10]))
# stop scoring
dataloader.join()
logging.info('[{}] local_sample_num: {}'.format(
hvd_rank, local_sample_num))
total_sample_num = hvd.allreduce(
torch.tensor(local_sample_num), op=hvd.Sum)
local_metrics_sum = get_sum([AUC, MRR, nDCG5, nDCG10])
total_metrics_sum = hvd.allreduce(torch.tensor(
local_metrics_sum, dtype=float), op=hvd.Sum)
if hvd_rank == 0:
print_metrics(hvd_rank, total_sample_num,
total_metrics_sum / total_sample_num)
def get_teacher_emb(args):
from model_bert_2 import ModelBert
import pickle
if args.enable_hvd:
import horovod.torch as hvd
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
news, news_index, category_dict, subcategory_dict = read_news_bert(
os.path.join(args.train_data_dir, 'news.tsv'), args, mode='train'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=-1)
class NewsDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.data.shape[0]
def news_collate_fn(arr):
arr = torch.LongTensor(arr)
return arr
for ckpt_path, teacher_emb in zip(args.teacher_ckpts, args.teacher_emb_paths):
model = ModelBert(args)
ckpt = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['model_state_dict'])
logging.info(f"loaded teacher model: {ckpt_path}")
del ckpt
torch.cuda.empty_cache()
model = model.cuda()
model.eval()
torch.set_grad_enabled(False)
news_dataset = NewsDataset(news_combined)
news_dataloader = DataLoader(news_dataset,
batch_size=args.batch_size * 4,
num_workers=args.num_workers,
collate_fn=news_collate_fn)
news_scoring = []
with torch.no_grad():
for input_ids in tqdm(news_dataloader):
input_ids = input_ids.cuda()
news_vec = model.news_encoder(input_ids)
news_vec = news_vec.to(torch.device("cpu")).detach().numpy()
news_scoring.extend(news_vec)
news_scoring = np.array(news_scoring)
logging.info("news scoring num: {}".format(news_scoring.shape[0]))
doc_sim = 0
for _ in tqdm(range(1000000)):
i = random.randrange(1, len(news_scoring))
j = random.randrange(1, len(news_scoring))
if i != j:
doc_sim += np.dot(news_scoring[i], news_scoring[j]) / (
np.linalg.norm(news_scoring[i]) * np.linalg.norm(news_scoring[j]))
print(f'=== doc-sim: {doc_sim / 1000000} ===')
with open(teacher_emb, 'wb') as f:
pickle.dump(news_scoring, f)
logging.info(f"teacher embedding saved at {teacher_emb}")
if __name__ == "__main__":
utils.setuplogger()
args = parse_args()
Path(args.model_dir).mkdir(parents=True, exist_ok=True)
if 'train' in args.mode:
train(args)
if 'test' in args.mode:
test(args)
if 'get_teacher_emb' in args.mode:
get_teacher_emb(args)
| 15,933 | 32.687104 | 118 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/streaming.py | import os
import logging
import fnmatch
import random
import numpy as np
import tensorflow as tf
import subprocess
def get_stat(dirname, filename_pat="*"):
if not tf.io.gfile.exists(dirname):
logging.warning(f"{dirname} does not exist!")
return None
stat = {}
for x in tf.io.gfile.listdir(dirname):
if fnmatch.fnmatch(x, filename_pat):
file = os.path.join(dirname, x)
result = subprocess.getoutput(f'wc -l {file}')
size = int(result.split(' ')[0])
stat[file] = size
return stat
def get_files(dirname, filename_pat="*", recursive=False):
if not tf.io.gfile.exists(dirname):
logging.warning(f"{dirname} does not exist!")
return None
files = []
for x in tf.io.gfile.listdir(dirname):
path = os.path.join(dirname, x)
if tf.io.gfile.isdir(path):
if recursive:
files.extend(get_files(path, filename_pat))
elif fnmatch.fnmatch(x, filename_pat):
files.append(path)
return files
def get_worker_files(dirname,
worker_rank,
world_size,
filename_pat="*",
shuffle=False,
seed=0):
"""Get file paths belong to one worker."""
all_files = get_files(dirname, filename_pat)
all_files.sort()
if shuffle:
random.seed(seed)
random.shuffle(all_files)
files = []
for i in range(worker_rank, len(all_files), world_size):
files.append(all_files[i])
logging.info(
f"worker_rank:{worker_rank}, world_size:{world_size}, shuffle:{shuffle}, seed:{seed}, directory:{dirname}, files:{files}"
)
return files
class StreamReader:
def __init__(self, data_paths, batch_size, shuffle=False, shuffle_buffer_size=1000):
tf.config.experimental.set_visible_devices([], device_type="GPU")
path_len = len(data_paths)
dataset = tf.data.Dataset.list_files(data_paths).interleave(
lambda x: tf.data.TextLineDataset(x),
cycle_length=path_len,
block_length=128,
num_parallel_calls=min(path_len, 64),
)
if shuffle:
dataset = dataset.shuffle(
shuffle_buffer_size, reshuffle_each_iteration=True)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
self.next_batch = dataset.make_one_shot_iterator().get_next()
self.session = None
def reset(self):
if self.session:
self.session.close()
self.session = tf.Session()
self.endofstream = False
def get_next(self):
try:
ret = self.session.run(self.next_batch)
except tf.errors.OutOfRangeError:
self.endofstream = True
return None
return ret
def reach_end(self):
return self.endofstream
class StreamSampler:
def __init__(
self,
data_dir,
filename_pat,
batch_size,
worker_rank,
world_size,
enable_shuffle=False,
shuffle_buffer_size=1000,
shuffle_seed=0,
):
data_paths = get_worker_files(
data_dir,
worker_rank,
world_size,
filename_pat,
shuffle=enable_shuffle,
seed=shuffle_seed,
)
self.stream_reader = StreamReader(
data_paths,
batch_size,
enable_shuffle,
shuffle_buffer_size
)
def __iter__(self):
self.stream_reader.reset()
return self
def __next__(self):
"""Implement iterator interface."""
next_batch = self.stream_reader.get_next()
if not isinstance(next_batch, np.ndarray) and not isinstance(
next_batch, tuple):
raise StopIteration
return next_batch
def reach_end(self):
return self.stream_reader.reach_end()
| 3,990 | 27.507143 | 129 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/parameters.py | import argparse
import utils
import logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode",
type=str,
default="train",
choices=['train', 'test', 'get_teacher_emb'])
parser.add_argument(
"--train_data_dir",
type=str,
default="../MIND/MINDlarge_train",
)
parser.add_argument(
"--test_data_dir",
type=str,
default="../MIND/MINDlarge_test",
)
parser.add_argument("--filename_pat", type=str, default="behaviors_np4_*.tsv")
parser.add_argument("--model_dir", type=str, default='./model')
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--npratio", type=int, default=4)
parser.add_argument("--enable_gpu", type=utils.str2bool, default=True)
parser.add_argument("--enable_hvd", type=utils.str2bool, default=True)
parser.add_argument("--enable_shuffle", type=utils.str2bool, default=True)
parser.add_argument("--shuffle_buffer_size", type=int, default=10000)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--filter_num", type=int, default=3)
parser.add_argument("--log_steps", type=int, default=100)
# model training
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--num_words_title", type=int, default=20)
parser.add_argument("--num_words_abstract", type=int, default=50)
parser.add_argument("--num_words_body", type=int, default=100)
parser.add_argument(
"--user_log_length",
type=int,
default=50,
)
parser.add_argument(
"--word_embedding_dim",
type=int,
default=300,
)
parser.add_argument(
"--glove_embedding_path",
type=str,
default='./glove.840B.300d.txt',
)
parser.add_argument("--freeze_embedding", type=utils.str2bool, default=False)
parser.add_argument(
"--news_dim",
type=int,
default=64,
)
parser.add_argument(
"--news_query_vector_dim",
type=int,
default=200,
)
parser.add_argument(
"--user_query_vector_dim",
type=int,
default=200,
)
parser.add_argument(
"--num_attention_heads",
type=int,
default=20,
)
parser.add_argument("--user_log_mask", type=utils.str2bool, default=True)
parser.add_argument("--drop_rate", type=float, default=0.2)
parser.add_argument("--save_steps", type=int, default=1000)
parser.add_argument("--max_steps_per_epoch", type=int, default=1000000)
parser.add_argument("--load_ckpt_name",
type=str,
default=None,
help="choose which ckpt to load and test")
# bert
parser.add_argument("--apply_bert", type=utils.str2bool, default=False)
parser.add_argument("--model_type", default="bert", type=str)
parser.add_argument("--do_lower_case", type=utils.str2bool, default=True)
parser.add_argument("--model_name", default="../bert-base-uncased/pytorch_model.bin", type=str)
parser.add_argument("--config_name", default="../bert-base-uncased/config.json", type=str)
parser.add_argument("--tokenizer_name", default="../bert-base-uncased/vocab.txt", type=str)
parser.add_argument("--num_hidden_layers", type=int, default=8)
parser.add_argument("--bert_trainable_layer",
type=int,
nargs='+',
default=[],
choices=list(range(12)))
parser.add_argument("--model", type=str, default=None)
parser.add_argument("--pooling", type=str, default='att')
parser.add_argument("--start_epoch", type=int, default=0)
parser.add_argument("--use_pretrain_model", type=utils.str2bool, default=False)
parser.add_argument("--pretrain_model_path", type=str, default=None)
parser.add_argument("--pretrain_lr", type=float, default=0.00001)
parser.add_argument("--num_teacher_layers", type=int, default=12)
parser.add_argument("--num_student_layers", type=int, default=4)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--coef", type=float, default=1.0)
parser.add_argument("--tensorboard", type=str, default=None)
parser.add_argument("--teacher_ckpts", type=str, nargs='+', default=[])
parser.add_argument("--teacher_emb_paths", type=str, nargs='+', default=[])
parser.add_argument("--num_teachers", type=int, default=4)
args = parser.parse_args()
logging.info(args)
return args
if __name__ == "__main__":
args = parse_args()
| 4,778 | 37.232 | 99 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/model_bert_2.py | import numpy as np
import torch
from torch import nn
from utils import MODEL_CLASSES
class AttentionPooling(nn.Module):
def __init__(self, emb_size, hidden_size):
super(AttentionPooling, self).__init__()
self.att_fc1 = nn.Linear(emb_size, hidden_size)
self.att_fc2 = nn.Linear(hidden_size, 1)
def forward(self, x, attn_mask=None):
"""
Args:
x: batch_size, candidate_size, emb_dim
attn_mask: batch_size, candidate_size
Returns:
(shape) batch_size, emb_dim
"""
bz = x.shape[0]
e = self.att_fc1(x)
e = nn.Tanh()(e)
alpha = self.att_fc2(e)
alpha = torch.exp(alpha)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)
x = torch.bmm(x.permute(0, 2, 1), alpha).squeeze(dim=-1)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask=None):
'''
Q: batch_size, n_head, candidate_num, d_k
K: batch_size, n_head, candidate_num, d_k
V: batch_size, n_head, candidate_num, d_v
attn_mask: batch_size, n_head, candidate_num
Return: batch_size, n_head, candidate_num, d_v
'''
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
scores = torch.exp(scores)
if attn_mask is not None:
scores = scores * attn_mask.unsqueeze(dim=-2)
attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)
context = torch.matmul(attn, V)
return context
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_model, n_heads, d_k, d_v):
super(MultiHeadSelfAttention, self).__init__()
self.d_model = d_model
self.n_heads = n_heads
self.d_k = d_k
self.d_v = d_v
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, Q, K, V, mask=None):
'''
Q: batch_size, candidate_num, d_model
K: batch_size, candidate_num, d_model
V: batch_size, candidate_num, d_model
mask: batch_size, candidate_num
'''
batch_size = Q.shape[0]
if mask is not None:
mask = mask.unsqueeze(dim=1).expand(-1, self.n_heads, -1)
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads,
self.d_k).transpose(1, 2)
k_s = self.W_K(K).view(batch_size, -1, self.n_heads,
self.d_k).transpose(1, 2)
v_s = self.W_V(V).view(batch_size, -1, self.n_heads,
self.d_v).transpose(1, 2)
context = self.scaled_dot_product_attn(q_s, k_s, v_s, mask)
output = context.transpose(1, 2).contiguous().view(
batch_size, -1, self.n_heads * self.d_v)
return output
class NewsEncoder(nn.Module):
def __init__(self, args):
super(NewsEncoder, self).__init__()
self.pooling = args.pooling
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
self.output_index = 3 if args.model_type == 'tnlrv3' else 2
self.bert_config = config_class.from_pretrained(
args.config_name,
output_hidden_states=True,
num_hidden_layers=args.num_hidden_layers)
self.bert_model = model_class.from_pretrained(
args.model_name, config=self.bert_config)
if args.pooling == 'att':
self.attn = AttentionPooling(
self.bert_config.hidden_size, args.news_query_vector_dim)
self.dense = nn.Linear(self.bert_config.hidden_size, args.news_dim)
def forward(self, x):
'''
x: batch_size, word_num * 2
mask: batch_size, word_num
'''
batch_size, num_words = x.shape
num_words = num_words // 2
text_ids = torch.narrow(x, 1, 0, num_words)
text_attmask = torch.narrow(x, 1, num_words, num_words)
word_vecs = self.bert_model(text_ids, text_attmask)[
self.output_index][self.bert_config.num_hidden_layers]
if self.pooling == 'cls':
news_vec = torch.narrow(word_vecs, 1, 0, 1).squeeze(dim=1)
elif self.pooling == 'att':
news_vec = self.attn(word_vecs)
else:
news_vec = torch.mean(word_vecs, dim=1)
news_vec = self.dense(news_vec)
return news_vec
class UserEncoder(nn.Module):
def __init__(self, args):
super(UserEncoder, self).__init__()
self.args = args
if args.model == 'NRMS':
self.multi_head_self_attn = MultiHeadSelfAttention(
args.news_dim, args.num_attention_heads, 16, 16)
self.attn = AttentionPooling(
args.num_attention_heads * 16, args.user_query_vector_dim)
else:
self.attn = AttentionPooling(
args.news_dim, args.user_query_vector_dim)
self.pad_doc = nn.Parameter(torch.empty(
1, args.news_dim).uniform_(-1, 1)).type(torch.FloatTensor)
def forward(self, news_vecs, log_mask=None):
'''
news_vecs: batch_size, history_num, news_dim
log_mask: batch_size, history_num
'''
bz = news_vecs.shape[0]
if self.args.user_log_mask:
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(
news_vecs, news_vecs, news_vecs, log_mask)
user_vec = self.attn(news_vecs, log_mask)
else:
user_vec = self.attn(news_vecs, log_mask)
else:
padding_doc = self.pad_doc.unsqueeze(dim=0).expand(
bz, self.args.user_log_length, -1)
news_vecs = news_vecs * \
log_mask.unsqueeze(dim=-1) + padding_doc * \
(1 - log_mask.unsqueeze(dim=-1))
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(
news_vecs, news_vecs, news_vecs)
user_vec = self.attn(news_vecs)
else:
user_vec = self.attn(news_vecs)
return user_vec
class ModelBert(torch.nn.Module):
def __init__(self, args):
super(ModelBert, self).__init__()
self.args = args
self.news_encoder = NewsEncoder(args)
self.user_encoder = UserEncoder(args)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, history, history_mask, candidate, label):
'''
history: batch_size, history_length, num_word_title * 2
history_mask: batch_size, history_length
candidate: batch_size, 1+K, num_word_title * 2
label: batch_size, 1+K
'''
batch_size = history.shape[0]
input_id_num = history.shape[-1]
candidate_news = candidate.reshape(-1, input_id_num)
candidate_news_vecs = self.news_encoder(
candidate_news).reshape(batch_size, -1, self.args.news_dim)
history_news = history.reshape(-1, input_id_num)
history_news_vecs = self.news_encoder(
history_news).reshape(-1, self.args.user_log_length, self.args.news_dim)
user_vec = self.user_encoder(history_news_vecs, history_mask)
score = torch.bmm(candidate_news_vecs,
user_vec.unsqueeze(dim=-1)).squeeze(dim=-1)
loss = self.loss_fn(score, label)
return loss, score
| 8,043 | 36.588785 | 84 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/metrics.py | from sklearn.metrics import roc_auc_score
import numpy as np
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2**y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def ctr_score(y_true, y_score, k=1):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
return np.mean(y_true)
| 793 | 25.466667 | 52 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/model_bert.py | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from utils import MODEL_CLASSES
class AttentionPooling(nn.Module):
def __init__(self, emb_size, hidden_size):
super(AttentionPooling, self).__init__()
self.att_fc1 = nn.Linear(emb_size, hidden_size)
self.att_fc2 = nn.Linear(hidden_size, 1)
def forward(self, x, attn_mask=None):
"""
Args:
x: batch_size, candidate_size, emb_dim
attn_mask: batch_size, candidate_size
Returns:
(shape) batch_size, emb_dim
"""
bz = x.shape[0]
e = self.att_fc1(x)
e = nn.Tanh()(e)
alpha = self.att_fc2(e)
alpha = torch.exp(alpha)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)
x = torch.bmm(x.permute(0, 2, 1), alpha).squeeze(dim=-1)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask=None):
'''
Q: batch_size, n_head, candidate_num, d_k
K: batch_size, n_head, candidate_num, d_k
V: batch_size, n_head, candidate_num, d_v
attn_mask: batch_size, n_head, candidate_num
Return: batch_size, n_head, candidate_num, d_v
'''
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
scores = torch.exp(scores)
if attn_mask is not None:
scores = scores * attn_mask.unsqueeze(dim=-2)
attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)
context = torch.matmul(attn, V)
return context
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_model, n_heads, d_k, d_v):
super(MultiHeadSelfAttention, self).__init__()
self.d_model = d_model
self.n_heads = n_heads
self.d_k = d_k
self.d_v = d_v
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, Q, K, V, mask=None):
'''
Q: batch_size, candidate_num, d_model
K: batch_size, candidate_num, d_model
V: batch_size, candidate_num, d_model
mask: batch_size, candidate_num
'''
batch_size = Q.shape[0]
if mask is not None:
mask = mask.unsqueeze(dim=1).expand(-1, self.n_heads, -1)
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
context = self.scaled_dot_product_attn(q_s, k_s, v_s, mask)
output = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
return output
class NewsEncoder(nn.Module):
def __init__(self, args, is_teacher):
super(NewsEncoder, self).__init__()
self.pooling = args.pooling
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
self.output_index = 3 if args.model_type == 'tnlrv3' else 2
self.bert_config = config_class.from_pretrained(
args.config_name,
output_hidden_states=True,
num_hidden_layers=args.num_teacher_layers if is_teacher else args.num_student_layers)
self.bert_model = model_class.from_pretrained(args.model_name, config=self.bert_config)
if args.pooling == 'att':
self.attn = AttentionPooling(self.bert_config.hidden_size, args.news_query_vector_dim)
self.dense = nn.Linear(self.bert_config.hidden_size, args.news_dim)
def forward(self, x):
'''
x: batch_size, word_num * 2
mask: batch_size, word_num
'''
batch_size, num_words = x.shape
num_words = num_words // 2
text_ids = torch.narrow(x, 1, 0, num_words)
text_attmask = torch.narrow(x, 1, num_words, num_words)
word_vecs = self.bert_model(
text_ids, text_attmask)[self.output_index][self.bert_config.num_hidden_layers]
if self.pooling == 'cls':
news_vec = torch.narrow(word_vecs, 1, 0, 1).squeeze(dim=1)
elif self.pooling == 'att':
news_vec = self.attn(word_vecs)
else:
news_vec = torch.mean(word_vecs, dim=1)
news_vec = self.dense(news_vec)
return news_vec
class UserEncoder(nn.Module):
def __init__(self, args):
super(UserEncoder, self).__init__()
self.args = args
if args.model == 'NRMS':
self.multi_head_self_attn = MultiHeadSelfAttention(args.news_dim,
args.num_attention_heads, 16, 16)
self.attn = AttentionPooling(args.num_attention_heads * 16, args.user_query_vector_dim)
else:
self.attn = AttentionPooling(args.news_dim, args.user_query_vector_dim)
self.pad_doc = nn.Parameter(torch.empty(1,
args.news_dim).uniform_(-1,
1)).type(torch.FloatTensor)
def forward(self, news_vecs, log_mask=None):
'''
news_vecs: batch_size, history_num, news_dim
log_mask: batch_size, history_num
'''
bz = news_vecs.shape[0]
if self.args.user_log_mask:
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(news_vecs, news_vecs, news_vecs, log_mask)
user_vec = self.attn(news_vecs, log_mask)
else:
user_vec = self.attn(news_vecs, log_mask)
else:
padding_doc = self.pad_doc.unsqueeze(dim=0).expand(bz, self.args.user_log_length, -1)
news_vecs = news_vecs * log_mask.unsqueeze(
dim=-1) + padding_doc * (1 - log_mask.unsqueeze(dim=-1))
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(news_vecs, news_vecs, news_vecs)
user_vec = self.attn(news_vecs)
else:
user_vec = self.attn(news_vecs)
return user_vec
class ModelBert(torch.nn.Module):
def __init__(self, args, is_teacher):
super(ModelBert, self).__init__()
self.args = args
self.news_encoder = NewsEncoder(args, is_teacher)
self.user_encoder = UserEncoder(args)
def forward(self, history, history_mask, candidate):
'''
history: batch_size, history_length, num_word_title * 2
history_mask: batch_size, history_length
candidate: batch_size, 1+K, num_word_title * 2
'''
batch_size = history.shape[0]
input_id_num = history.shape[-1]
candidate_news = candidate.reshape(-1, input_id_num)
candidate_news_vecs = self.news_encoder(candidate_news).reshape(
batch_size, -1, self.args.news_dim)
history_news = history.reshape(-1, input_id_num)
history_news_vecs = self.news_encoder(history_news).reshape(-1, self.args.user_log_length,
self.args.news_dim)
user_vec = self.user_encoder(history_news_vecs, history_mask)
score = torch.bmm(candidate_news_vecs, user_vec.unsqueeze(dim=-1)).squeeze(dim=-1)
return score, history_news_vecs, candidate_news_vecs, user_vec
def kd_ce_loss(logits_S, logits_T, temperature=1):
'''
Calculate the cross entropy between logits_S and logits_T
:param logits_S: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param logits_T: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param temperature: A float or a tensor of shape (batch_size, length) or (batch_size,)
'''
beta_logits_T = logits_T / temperature
beta_logits_S = logits_S / temperature
p_T = F.softmax(beta_logits_T, dim=-1)
loss = -(p_T * F.log_softmax(beta_logits_S, dim=-1)).sum(dim=-1).mean()
return loss
def hid_mse_loss(state_S, state_T, mask=None, reduce=True):
'''
* Calculates the mse loss between `state_S` and `state_T`, which are the hidden state of the models.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.Tensor state_S: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor state_T: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if mask is None:
if not reduce:
loss = F.mse_loss(state_S, state_T, reduction='none').mean(dim=-1)
else:
loss = F.mse_loss(state_S, state_T)
else:
if not reduce:
loss = (F.mse_loss(state_S, state_T, reduction='none') *
mask.unsqueeze(-1)).mean(dim=-1)
else:
valid_count = mask.sum() * state_S.size(-1)
loss = (F.mse_loss(state_S, state_T, reduction='none') *
mask.unsqueeze(-1)).sum() / valid_count
return loss
class Model(torch.nn.Module):
def __init__(self, args):
super(Model, self).__init__()
self.args = args
self.teachers = nn.ModuleList([UserEncoder(args) for _ in range(args.num_teachers)])
self.student = ModelBert(args, is_teacher=False)
self.target_loss_fn = nn.CrossEntropyLoss()
self.transform_matrix = nn.ModuleList(
[nn.Linear(args.news_dim, args.news_dim) for _ in range(args.num_teachers)])
for module in self.transform_matrix:
nn.init.xavier_uniform_(module.weight, gain=1.)
nn.init.constant_(module.bias, 0.0)
def forward(self, history, history_mask, candidate, label, teacher_history_embs,
teacher_candidate_embs):
'''
teacher_history_embs: [(batch_size, user_log_length, news_emb) * num_teachers]
teacher_candidate_emb: [(batch_size, 1+K, news_emb) * num_teachers]
'''
student_score, student_history_emb, student_candidate_emb, student_user_emb = self.student(
history, history_mask, candidate)
student_news_emb = torch.cat([student_history_emb, student_candidate_emb], dim=1)
target_loss = self.target_loss_fn(student_score, label)
teacher_scores, teacher_losses = [], []
NE_MSEs, UE_MSEs = [], []
for i, (teacher_history,
teacher_candidate) in enumerate(zip(teacher_history_embs, teacher_candidate_embs)):
teacher_news_emb = torch.cat([teacher_history, teacher_candidate], dim=1)
teacher_news_emb_proj = self.transform_matrix[i](teacher_news_emb)
NE_MSEs.append(
hid_mse_loss(student_news_emb, teacher_news_emb_proj, reduce=False).mean(dim=-1))
teacher_user_vector = self.teachers[i](teacher_history, history_mask)
teacher_user_vector_proj = self.transform_matrix[i](teacher_user_vector)
UE_MSEs.append(hid_mse_loss(student_user_emb, teacher_user_vector_proj, reduce=False))
score = torch.bmm(teacher_candidate,
teacher_user_vector.unsqueeze(dim=-1)).squeeze(dim=-1)
teacher_loss = F.cross_entropy(score, label, reduction='none')
teacher_scores.append(score)
teacher_losses.append(teacher_loss)
teacher_losses = -torch.stack(teacher_losses, dim=-1)
teacher_weights = F.softmax(teacher_losses, dim=-1)
teacher_scores = torch.stack(teacher_scores, dim=-1)
teacher_scores = torch.bmm(teacher_scores,
teacher_weights.unsqueeze(dim=-1)).squeeze(dim=-1)
distill_loss = kd_ce_loss(student_score, teacher_scores, self.args.temperature)
NE_MSEs = torch.stack(NE_MSEs, dim=-1)
UE_MSEs = torch.stack(UE_MSEs, dim=-1)
emb_loss = (NE_MSEs * teacher_weights).sum(dim=-1).mean() + (UE_MSEs * teacher_weights).sum(
dim=-1).mean()
total_loss = distill_loss + self.args.coef * target_loss + emb_loss
return total_loss, distill_loss, emb_loss, target_loss, student_score | 12,966 | 41.375817 | 141 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/preprocess.py | import tensorflow as tf
from tqdm import tqdm
import numpy as np
from utils import MODEL_CLASSES
def update_dict(dict, key, value=None):
if key not in dict:
if value is None:
dict[key] = len(dict) + 1
else:
dict[key] = value
def read_news_bert(news_path, args, mode='train'):
news = {}
category_dict = {}
subcategory_dict = {}
news_index = {}
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name, do_lower_case=True)
with tf.io.gfile.GFile(news_path, "r") as f:
for line in tqdm(f):
splited = line.strip('\n').split('\t')
doc_id, category, subcategory, title, _, _, _, _ = splited
update_dict(news_index, doc_id)
title = title.lower()
title = tokenizer(title, max_length=args.num_words_title,
pad_to_max_length=True, truncation=True)
update_dict(news, doc_id, [title, category, subcategory])
if mode == 'train':
update_dict(category_dict, category)
update_dict(subcategory_dict, subcategory)
if mode == 'train':
return news, news_index, category_dict, subcategory_dict
elif mode == 'test':
return news, news_index
else:
assert False, 'Wrong mode!'
def get_doc_input_bert(news, news_index, category_dict, subcategory_dict, args):
news_num = len(news) + 1
news_title = np.zeros((news_num, args.num_words_title), dtype='int32')
news_title_attmask = np.zeros(
(news_num, args.num_words_title), dtype='int32')
news_category = np.zeros(news_num, dtype='int32')
news_subcategory = np.zeros(news_num, dtype='int32')
for key in tqdm(news):
title, category, subcategory = news[key]
doc_index = news_index[key]
news_title[doc_index] = title['input_ids']
news_title_attmask[doc_index] = title['attention_mask']
news_category[doc_index] = category_dict[category] if category in category_dict else 0
news_subcategory[doc_index] = subcategory_dict[subcategory] if subcategory in subcategory_dict else 0
return news_title, news_title_attmask, news_category, news_subcategory
| 2,316 | 33.58209 | 109 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/convert_state_dict.py | import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def get_checkpoint_from_transformer_cache(
archive_file, pretrained_model_name_or_path, pretrained_model_archive_map,
cache_dir, force_download, proxies, resume_download,
):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download,
proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file)
else:
msg = "Model name '{}' was not found in model name list ({}). " \
"We assumed '{}' was a path or url to model weight files named one of {} but " \
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
', '.join(pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME])
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
return torch.load(resolved_archive_file, map_location='cpu')
def load_model(state_dict):
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.endswith("attention.self.q_bias"):
new_state_dict[key.replace(
"attention.self.q_bias", "attention.self.query.bias")] = value.view(-1)
elif key.endswith("attention.self.v_bias"):
new_state_dict[key.replace(
"attention.self.v_bias", "attention.self.value.bias")] = value.view(-1)
new_state_dict[key.replace(
"attention.self.v_bias", "attention.self.key.bias")] = torch.zeros_like(value.view(-1))
elif key.endswith("attention.self.qkv_linear.weight"):
l, _ = value.size()
assert l % 3 == 0
l = l // 3
q, k, v = torch.split(
value, split_size_or_sections=(l, l, l), dim=0)
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.query.weight")] = q
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.key.weight")] = k
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.value.weight")] = v
elif key == "bert.encoder.rel_pos_bias.weight":
new_state_dict["bert.rel_pos_bias.weight"] = value
else:
new_state_dict[key] = value
del state_dict
return new_state_dict
state_dict_convert = {
'tnlrv3': load_model,
}
| 3,162 | 40.077922 | 109 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/tokenization_tnlrv3.py | # coding=utf-8
"""Tokenization classes for TuringNLRv3."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
}
class TuringNLRv3Tokenizer(BertTokenizer):
r"""
Constructs a TuringNLRv3Tokenizer.
:class:`~transformers.TuringNLRv3Tokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| 1,646 | 31.94 | 145 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/s2s_loader.py | import numpy as np
from random import randint
import logging
import torch
import torch.utils.data
logger = logging.getLogger(__name__)
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.word_subsample_prb = None
self.sp_prob = None
self.pieces_dir = None
self.vocab_words = None
self.pieces_threshold = 10
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def __call__(self, instance):
raise NotImplementedError
class Preprocess4Seq2seqDecoder(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128,
mode="s2s", pos_shift=False, source_type_id=0, target_type_id=1,
cls_token='[CLS]', sep_token='[SEP]', pad_token='[PAD]'):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.task_idx = 3 # relax projection layer for different tasks
assert mode in ("s2s", "l2r")
self.mode = mode
self.max_tgt_length = max_tgt_length
self.pos_shift = pos_shift
self.delta = 1 if pos_shift else 2
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.cc = 0
def __call__(self, instance):
tokens_a, max_a_len = instance
padded_tokens_a = [self.cls_token] + tokens_a
if not self.pos_shift:
padded_tokens_a = padded_tokens_a + [self.sep_token]
assert len(padded_tokens_a) <= max_a_len + self.delta
if max_a_len + self.delta > len(padded_tokens_a):
padded_tokens_a += [self.pad_token] * \
(max_a_len + self.delta - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + self.delta
max_len_in_batch = min(self.max_tgt_length +
max_a_len + self.delta, self.max_len)
tokens = padded_tokens_a
segment_ids = [self.source_type_id] * (len(padded_tokens_a)) \
+ [self.target_type_id] * (max_len_in_batch - len(padded_tokens_a))
mask_qkv = None
position_ids = []
for i in range(len(tokens_a) + self.delta):
position_ids.append(i)
for i in range(len(tokens_a) + self.delta, max_a_len + self.delta):
position_ids.append(0)
for i in range(max_a_len + self.delta, max_len_in_batch):
position_ids.append(
i - (max_a_len + self.delta) + len(tokens_a) + self.delta)
# Token Indexing
input_ids = self.indexer(tokens)
self.cc += 1
if self.cc < 20:
# print("Vocab size = %d" % len(self.vocab_words))
# for tk_id in input_ids:
# print(u"trans %d -> %s" % (tk_id, self.vocab_words[tk_id]))
logger.info(u"Input src = %s" % " ".join(
(self.vocab_words[tk_id]) for tk_id in input_ids))
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
if self.mode == "s2s":
input_mask[:, :len(tokens_a) + self.delta].fill_(1)
else:
st, end = 0, len(tokens_a) + self.delta
input_mask[st:end, st:end].copy_(
self._tril_matrix[:end, :end])
input_mask[end:, :len(tokens_a) + self.delta].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)
| 5,318 | 32.878981 | 90 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/modeling.py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from transformers.modeling_bert import \
BertPreTrainedModel, BertSelfOutput, BertIntermediate, \
BertOutput, BertPredictionHeadTransform, BertPooler
from transformers.file_utils import WEIGHTS_NAME
from tnlrv3.config import TuringNLRv3ForSeq2SeqConfig
from tnlrv3.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert
logger = logging.getLogger(__name__)
BertLayerNorm = torch.nn.LayerNorm
TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP = {
}
class TuringNLRv3PreTrainedModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = TuringNLRv3ForSeq2SeqConfig
supported_convert_pretrained_model_archive_map = {
"tnlrv3": TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP,
}
base_model_prefix = "TuringNLRv3_for_seq2seq"
pretrained_model_archive_map = {
**TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP,
}
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, reuse_position_embedding=None,
replace_prefix=None, *model_args, **kwargs,
):
model_type = kwargs.pop('model_type', 'tnlrv3')
if model_type is not None and "state_dict" not in kwargs:
if model_type in cls.supported_convert_pretrained_model_archive_map:
pretrained_model_archive_map = cls.supported_convert_pretrained_model_archive_map[model_type]
if pretrained_model_name_or_path in pretrained_model_archive_map:
state_dict = get_checkpoint_from_transformer_cache(
archive_file=pretrained_model_archive_map[pretrained_model_name_or_path],
pretrained_model_name_or_path=pretrained_model_name_or_path,
pretrained_model_archive_map=pretrained_model_archive_map,
cache_dir=kwargs.get("cache_dir", None), force_download=kwargs.get("force_download", None),
proxies=kwargs.get("proxies", None), resume_download=kwargs.get("resume_download", None),
)
state_dict = state_dict_convert[model_type](state_dict)
kwargs["state_dict"] = state_dict
logger.info("Load HF ckpts")
elif os.path.isfile(pretrained_model_name_or_path):
state_dict = torch.load(pretrained_model_name_or_path, map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](state_dict)
logger.info("Load local ckpts")
elif os.path.isdir(pretrained_model_name_or_path):
state_dict = torch.load(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME), map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](state_dict)
logger.info("Load local ckpts")
else:
raise RuntimeError("Not fined the pre-trained checkpoint !")
if kwargs["state_dict"] is None:
logger.info("TNLRv3 does't support the model !")
raise NotImplementedError()
config = kwargs["config"]
state_dict = kwargs["state_dict"]
# initialize new position embeddings (From Microsoft/UniLM)
_k = 'bert.embeddings.position_embeddings.weight'
if _k in state_dict:
if config.max_position_embeddings > state_dict[_k].shape[0]:
logger.info("Resize > position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(mean=0.0, std=config.initializer_range)
max_range = config.max_position_embeddings if reuse_position_embedding else old_vocab_size
shift = 0
while shift < max_range:
delta = min(old_vocab_size, max_range - shift)
new_postion_embedding.data[shift: shift + delta, :] = state_dict[_k][:delta, :]
logger.info(" CP [%d ~ %d] into [%d ~ %d] " % (0, delta, shift, shift + delta))
shift += delta
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
elif config.max_position_embeddings < state_dict[_k].shape[0]:
logger.info("Resize < position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(mean=0.0, std=config.initializer_range)
new_postion_embedding.data.copy_(state_dict[_k][:config.max_position_embeddings, :])
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
if replace_prefix is not None:
new_state_dict = {}
for key in state_dict:
if key.startswith(replace_prefix):
new_state_dict[key[len(replace_prefix):]] = state_dict[key]
else:
new_state_dict[key] = state_dict[key]
kwargs["state_dict"] = new_state_dict
del state_dict
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
fix_word_embedding = getattr(config, "fix_word_embedding", None)
if fix_word_embedding:
self.word_embeddings.weight.requires_grad = False
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
else:
self.token_type_embeddings = None
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
if self.token_type_embeddings:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings, position_ids
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def multi_head_attention(self, query, key, value, attention_mask, rel_pos):
query_layer = self.transpose_for_scores(query)
key_layer = self.transpose_for_scores(key)
value_layer = self.transpose_for_scores(value)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs) if self.output_attentions else (context_layer,)
def forward(self, hidden_states, attention_mask=None,
encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
mixed_query_layer = self.query(hidden_states)
if split_lengths:
assert not self.output_attentions
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if split_lengths:
query_parts = torch.split(mixed_query_layer, split_lengths, dim=1)
key_parts = torch.split(mixed_key_layer, split_lengths, dim=1)
value_parts = torch.split(mixed_value_layer, split_lengths, dim=1)
key = None
value = None
outputs = []
sum_length = 0
for (query, _key, _value, part_length) in zip(query_parts, key_parts, value_parts, split_lengths):
key = _key if key is None else torch.cat((key, _key), dim=1)
value = _value if value is None else torch.cat((value, _value), dim=1)
sum_length += part_length
outputs.append(self.multi_head_attention(
query, key, value, attention_mask[:, :, sum_length - part_length: sum_length, :sum_length],
rel_pos=None if rel_pos is None else rel_pos[:, :, sum_length - part_length: sum_length, :sum_length],
)[0])
outputs = (torch.cat(outputs, dim=1), )
else:
outputs = self.multi_head_attention(
mixed_query_layer, mixed_key_layer, mixed_value_layer,
attention_mask, rel_pos=rel_pos)
return outputs
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
self_outputs = self.self(
hidden_states, attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
self_attention_outputs = self.attention(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self_attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + self_attention_outputs[1:]
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class TuringNLRv3Model(TuringNLRv3PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(TuringNLRv3Model, self).__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
if not isinstance(config, TuringNLRv3ForSeq2SeqConfig):
self.pooler = BertPooler(config)
else:
self.pooler = None
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, inputs_embeds=None, split_lengths=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output, position_ids = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
if self.config.rel_pos_bins > 0:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
else:
rel_pos = None
encoder_outputs = self.encoder(
embedding_output, attention_mask=extended_attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
sequence_output = encoder_outputs[0]
outputs = (sequence_output, ) + encoder_outputs[1:] # add hidden_states and attentions if they are here
if self.pooler is None:
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
else:
pooled_output = self.pooler(sequence_output)
return (sequence_output, pooled_output) + encoder_outputs[1:]
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.float().repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_weight = decoder_weight
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = F.linear(hidden_states, weight=self.decoder_weight, bias=self.bias)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_weight)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
def create_mask_and_position_ids(num_tokens, max_len, offset=None):
base_position_matrix = torch.arange(
0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1)
mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens)
if offset is not None:
base_position_matrix = base_position_matrix + offset.view(-1, 1)
position_ids = base_position_matrix * mask
return mask, position_ids
class TuringNLRv3ForSequenceToSequence(TuringNLRv3PreTrainedModel):
MODEL_NAME = 'basic class'
def __init__(self, config):
super(TuringNLRv3ForSequenceToSequence, self).__init__(config)
self.bert = TuringNLRv3Model(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
self.log_softmax = nn.LogSoftmax()
self.source_type_id = config.source_type_id
self.target_type_id = config.target_type_id
if config.label_smoothing > 0:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
class TuringNLRv3ForSequenceToSequenceWithPseudoMask(TuringNLRv3ForSequenceToSequence):
MODEL_NAME = "TuringNLRv3ForSequenceToSequenceWithPseudoMask"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids, -target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = (0 <= to_weight) & (torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1)
true_tokens_mask = (from_weight >= 0) & true_tokens & (to_weight <= from_weight)
pseudo_tokens_mask = (from_weight < 0) & true_tokens & (-to_weight > from_weight)
pseudo_tokens_mask = pseudo_tokens_mask | ((from_weight < 0) & (to_weight == from_weight))
return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask)
def forward(
self, source_ids, target_ids, label_ids, pseudo_ids,
num_source_tokens, num_target_tokens, target_span_ids=None, target_no_offset=None):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
pseudo_len = pseudo_ids.size(1)
assert target_len == pseudo_len
assert source_len > 0 and target_len > 0
split_lengths = (source_len, target_len, pseudo_len)
input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id,
torch.ones_like(pseudo_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=None if target_no_offset else num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids, target_position_ids), dim=1)
if target_span_ids is None:
target_span_ids = target_position_ids
attention_mask = self.create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
sequence_output = outputs[0]
pseudo_sequence_output = sequence_output[:, source_len + target_len:, ]
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(pseudo_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), label_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), label_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask)
return pseudo_lm_loss
class TuringNLRv3ForSequenceToSequenceUniLMV1(TuringNLRv3ForSequenceToSequence):
MODEL_NAME = "TuringNLRv3ForSequenceToSequenceUniLMV1"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = torch.cat((source_mask, target_mask), dim=1).unsqueeze(1)
return ((true_tokens == 1) & (to_weight <= from_weight)).type_as(source_mask)
def forward(self, source_ids, target_ids, masked_ids, masked_pos, masked_weight, num_source_tokens, num_target_tokens):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
split_lengths = (source_len, target_len)
input_ids = torch.cat((source_ids, target_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids), dim=1)
attention_mask = self.create_attention_mask(
source_mask, target_mask, source_position_ids, target_position_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
sequence_output = outputs[0]
target_sequence_output = sequence_output[:, source_len:, ]
masked_sequence_output = gather_seq_out_by_pos(target_sequence_output, masked_pos)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(masked_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weight)
return pseudo_lm_loss
class TuringNLRv3ForSequenceClassification(TuringNLRv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = TuringNLRv3Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
# head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, last_hidden_state, pooled_output, (hidden_states), (attentions)
| 37,949 | 46.319202 | 146 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/utils.py | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import torch
import tqdm
import array
import collections
import torch.utils.data
from transformers.file_utils import WEIGHTS_NAME
try:
import lmdb
except:
pass
OPTIM_NAME = "optimizer.bin"
logger = logging.getLogger(__name__)
class TrainingExample(object):
def __init__(self, source_ids, target_ids, example_id):
self.source_ids = source_ids
self.target_ids = target_ids
self.example_id = example_id
class Seq2seqDatasetForTuringNLRv3(torch.utils.data.Dataset):
def __init__(
self, features, max_source_len, max_target_len,
vocab_size, cls_id, sep_id, pad_id, mask_id,
random_prob, keep_prob, offset, num_training_instances,
finetuning_method='v1', target_mask_prob=-1.0, num_max_mask_token=0,
source_mask_prob=-1.0,
):
self.features = features
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.offset = offset
if offset > 0:
logger.info(
" **** Set offset %d in Seq2seqDatasetForBert **** ", offset)
self.cls_id = cls_id
self.sep_id = sep_id
self.pad_id = pad_id
self.random_prob = random_prob
self.keep_prob = keep_prob
self.mask_id = mask_id
self.vocab_size = vocab_size
self.num_training_instances = num_training_instances
self.target_mask_prob = target_mask_prob
if finetuning_method == 'v0':
num_max_mask_token = self.max_target_len
logger.info("Mask way v0: set num_max_mask_token = %d" %
num_max_mask_token)
self.num_max_mask_token = num_max_mask_token
self.finetuning_method = finetuning_method
assert finetuning_method in ('v0', 'v1', 'v2')
self.source_mask_prob = source_mask_prob
def __len__(self):
return self.num_training_instances
def __trunk(self, ids, max_len, append_sep=True):
if append_sep:
max_len -= 1
if len(ids) > max_len:
ids = ids[:max_len]
if append_sep:
ids = ids + [self.sep_id]
return ids
def __pad(self, ids, max_len):
if len(ids) < max_len:
return ids + [self.pad_id] * (max_len - len(ids))
else:
assert len(ids) == max_len
return ids
def get_masked_token(self, tk_id):
p = random.random()
if p < self.keep_prob:
return tk_id
elif p < self.keep_prob + self.random_prob:
return random.randint(0, self.vocab_size - 1)
else:
return self.mask_id
def __getitem__(self, _idx):
idx = (self.offset + _idx) % len(self.features)
# print("%d get %d" % (_idx, idx))
feature = self.features[idx]
source_ids = self.__trunk([self.cls_id] + feature.source_ids,
self.max_source_len, append_sep=self.finetuning_method != 'v0')
target_ids = feature.target_ids
if self.finetuning_method == 'v0':
target_ids = [self.sep_id] + target_ids
target_ids = self.__trunk(
target_ids, self.max_target_len, append_sep=self.finetuning_method != 'v0')
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
if self.source_mask_prob > 0:
for i in range(num_source_tokens):
tk_id = source_ids[i]
if tk_id != self.cls_id and tk_id != self.sep_id:
r = random.random()
if r < self.source_mask_prob:
source_ids[i] = self.get_masked_token(tk_id)
source_ids = self.__pad(source_ids, self.max_source_len)
target_ids = self.__pad(target_ids, self.max_target_len)
if self.finetuning_method == 'v0':
masked_pos = []
masked_ids = []
masked_weights = []
for pos in range(num_target_tokens):
if pos + 1 != num_target_tokens:
masked_ids.append(target_ids[pos + 1])
else:
masked_ids.append(self.sep_id)
masked_pos.append(pos)
masked_weights.append(1)
r = random.random()
if r < self.target_mask_prob and pos > 0:
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(
masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.finetuning_method == 'v1':
masked_pos = list(range(num_target_tokens))
random.shuffle(masked_pos)
num_masked_token = \
min(self.num_max_mask_token, int(
self.target_mask_prob * num_target_tokens))
if num_masked_token <= 0:
num_masked_token = 1
masked_pos = masked_pos[:num_masked_token]
masked_ids = []
masked_weights = []
for pos in masked_pos:
masked_ids.append(target_ids[pos])
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_weights.append(1)
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(
masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.finetuning_method == 'v2':
pseudo_ids = []
label_ids = []
for pos in range(num_target_tokens):
tk_id = target_ids[pos]
masked_tk_id = self.get_masked_token(tk_id)
pseudo_ids.append(masked_tk_id)
label_ids.append(tk_id)
r = random.random()
if r < self.target_mask_prob:
target_ids[pos] = masked_tk_id
label_ids = self.__pad(label_ids, self.max_target_len)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len)
return source_ids, target_ids, label_ids, pseudo_ids, num_source_tokens, num_target_tokens
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(
output_dir, "ckpt-*/%s" % WEIGHTS_NAME))
fn_optim_list = glob.glob(os.path.join(
output_dir, "ckpt-*/%s" % OPTIM_NAME))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_model_list]
) & set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def get_checkpoint_state_dict(output_dir, ckpt):
model_recover_checkpoint = os.path.join(
output_dir, "ckpt-%d" % ckpt, WEIGHTS_NAME)
logger.info(" ** Recover model checkpoint in %s ** ",
model_recover_checkpoint)
model_state_dict = torch.load(model_recover_checkpoint, map_location='cpu')
optimizer_recover_checkpoint = os.path.join(
output_dir, "ckpt-%d" % ckpt, OPTIM_NAME)
checkpoint_state_dict = torch.load(
optimizer_recover_checkpoint, map_location='cpu')
checkpoint_state_dict['model'] = model_state_dict
return checkpoint_state_dict
def report_length(length_counter, total_count):
max_len = max(length_counter.keys())
a = 0
tc = 0
while a < max_len:
cc = 0
for i in range(16):
cc += length_counter[a + i]
tc += cc
if cc > 0:
logger.info("%d ~ %d = %d, %.2f%%" %
(a, a + 16, cc, (tc * 100.0) / total_count))
a += 16
def serialize_str(x):
return u"{}".format(x).encode('ascii')
def serialize_array(x, dtype):
data = array.array(dtype)
data.fromlist(x)
return data.tobytes()
def write_to_lmdb(db, key, value):
success = False
while not success:
txn = db.begin(write=True)
try:
txn.put(key, value)
txn.commit()
success = True
except lmdb.MapFullError:
txn.abort()
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit*2
print('>>> Doubling LMDB map size to %sMB ...' %
(new_limit >> 20,))
db.set_mapsize(new_limit) # double it
def deserialize_str(x):
return x.decode('ascii')
class DocDB(object):
def __init__(self, db_path):
self.db_path = db_path
self.env = lmdb.open(db_path, readonly=True,
lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.start_key_index = int(deserialize_str(txn.get(b'__start__')))
self.size = int(deserialize_str(txn.get(b'__size__')))
self.dtype = deserialize_str(txn.get(b'__dtype__'))
def _deserialize_array(self, x):
data = array.array(self.dtype)
data.frombytes(x)
return data.tolist()
def __getitem__(self, doc_id):
with self.env.begin(write=False) as txn:
# example = {
# "source_ids": self._deserialize_array(txn.get(b"src_ids_%d" % doc_id)),
# "target_ids": self._deserialize_array(txn.get(b"tgt_ids_%d" % doc_id)),
# }
example = TrainingExample(
source_ids=self._deserialize_array(
txn.get(b"src_ids_%d" % doc_id)),
target_ids=self._deserialize_array(
txn.get(b"tgt_ids_%d" % doc_id)),
example_id=None,
)
return example
def __len__(self):
return self.size
def load_and_cache_examples(
example_file, tokenizer, local_rank, cached_features_file, shuffle=True,
lmdb_cache=None, lmdb_dtype='h', eval_mode=False):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.isfile(cached_features_file):
logger.info("Loading features from cached file %s",
cached_features_file)
features = torch.load(cached_features_file)
elif cached_features_file is not None and os.path.isdir(cached_features_file) \
and os.path.exists(os.path.join(cached_features_file, 'lock.mdb')):
logger.info("Loading features from cached LMDB %s",
cached_features_file)
features = DocDB(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", example_file)
examples = []
with open(example_file, mode="r", encoding="utf-8") as reader:
for line in reader:
examples.append(json.loads(line))
features = []
slc = collections.defaultdict(int)
tlc = collections.defaultdict(int)
for example in tqdm.tqdm(examples):
if isinstance(example["src"], list):
source_tokens = example["src"]
target_tokens = [] if eval_mode else example["tgt"]
else:
source_tokens = tokenizer.tokenize(example["src"])
target_tokens = [] if eval_mode else tokenizer.tokenize(
example["tgt"])
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
slc[len(source_ids)] += 1
tlc[len(target_ids)] += 1
features.append(
TrainingExample(
source_ids=source_ids,
target_ids=target_ids,
example_id=len(features),
)
)
if shuffle:
random.shuffle(features)
logger.info("Shuffle the features !")
logger.info("Source length:")
report_length(slc, total_count=len(examples))
logger.info("Target length:")
report_length(tlc, total_count=len(examples))
if local_rank in [-1, 0] and cached_features_file is not None:
if lmdb_cache:
db = lmdb.open(cached_features_file,
readonly=False, map_async=True)
for idx, feature in enumerate(features):
write_to_lmdb(
db, b"src_ids_%d" % idx,
serialize_array(feature.source_ids, dtype=lmdb_dtype))
write_to_lmdb(
db, b"tgt_ids_%d" % idx,
serialize_array(feature.target_ids, dtype=lmdb_dtype))
write_to_lmdb(db, b"__start__", serialize_str(0))
write_to_lmdb(db, b"__size__", serialize_str(len(features)))
write_to_lmdb(db, b"__dtype__", serialize_str(lmdb_dtype))
db.sync()
db.close()
logger.info("db_key_idx = %d" % len(features))
del features
features = cached_features_file
logger.info("Saving features into cached lmdb dir %s",
cached_features_file)
else:
logger.info("Saving features into cached file %s",
cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
| 14,533 | 35.888325 | 119 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/modeling_decoding.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None,
reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
logger = logging.getLogger(__name__)
from transformers import WEIGHTS_NAME
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
relax_projection=0,
new_pos_ids=False,
initializer_range=0.02,
task_idx=None,
fp32_embedding=False,
ffn_type=0,
label_smoothing=None,
num_qkv=0,
seg_emb=False,
source_type_id=0,
target_type_id=1,
rel_pos_bins=0,
max_rel_pos=0, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.relax_projection = relax_projection
self.new_pos_ids = new_pos_ids
self.initializer_range = initializer_range
self.task_idx = task_idx
self.fp32_embedding = fp32_embedding
self.ffn_type = ffn_type
self.label_smoothing = label_smoothing
self.num_qkv = num_qkv
self.seg_emb = seg_emb
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
if config.type_vocab_size == 0:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
embeddings = words_embeddings + position_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if hasattr(config, 'num_qkv') and (config.num_qkv > 1):
self.num_qkv = config.num_qkv
else:
self.num_qkv = 1
self.query = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.key = nn.Linear(config.hidden_size,
self.all_head_size * self.num_qkv)
self.value = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.uni_debug_flag = True if os.getenv(
'UNI_DEBUG_FLAG', '') else False
if self.uni_debug_flag:
self.register_buffer('debug_attention_probs',
torch.zeros((512, 512)))
if hasattr(config, 'seg_emb') and config.seg_emb:
self.b_q_s = nn.Parameter(torch.zeros(
1, self.num_attention_heads, 1, self.attention_head_size))
self.seg_emb = nn.Embedding(
config.type_vocab_size, self.all_head_size)
else:
self.b_q_s = None
self.seg_emb = None
def transpose_for_scores(self, x, mask_qkv=None):
if self.num_qkv > 1:
sz = x.size()[:-1] + (self.num_qkv,
self.num_attention_heads, self.all_head_size)
# (batch, pos, num_qkv, head, head_hid)
x = x.view(*sz)
if mask_qkv is None:
x = x[:, :, 0, :, :]
elif isinstance(mask_qkv, int):
x = x[:, :, mask_qkv, :, :]
else:
# mask_qkv: (batch, pos)
if mask_qkv.size(1) > sz[1]:
mask_qkv = mask_qkv[:, :sz[1]]
# -> x: (batch, pos, head, head_hid)
x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand(
sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2)
else:
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
# (batch, pos, head, head_hid)
x = x.view(*sz)
# (batch, head, pos, head_hid)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None,
key_cache=None, value_cache=None, rel_pos=None,
):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(hidden_states, self.key.weight)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(x_states, self.key.weight)
mixed_value_layer = self.value(x_states)
if key_cache is not None and isinstance(key_cache, list):
key_cache.append(mixed_key_layer)
mixed_key_layer = torch.cat(key_cache, dim=1)
if value_cache is not None and isinstance(value_cache, list):
value_cache.append(mixed_value_layer)
mixed_value_layer = torch.cat(value_cache, dim=1)
query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv)
key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv)
value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv)
if key_history is not None and not isinstance(key_history, list):
key_layer = torch.cat((key_history, key_layer), dim=-2)
value_layer = torch.cat((value_history, value_layer), dim=-2)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch, head, pos, pos)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
if self.seg_emb is not None:
seg_rep = self.seg_emb(seg_ids)
# (batch, pos, head, head_hid)
seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size(
1), self.num_attention_heads, self.attention_head_size)
qs = torch.einsum('bnih,bjnh->bnij',
query_layer + self.b_q_s, seg_rep)
attention_scores = attention_scores + qs
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.uni_debug_flag:
_pos = attention_probs.size(-1)
self.debug_attention_probs[:_pos, :_pos].copy_(
attention_probs[0].mean(0).view(_pos, _pos))
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if isinstance(key_history, list):
key_history.append(key_layer)
if isinstance(value_history, list):
value_history.append(value_layer)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerFFN(nn.Module):
def __init__(self, config):
super(TransformerFFN, self).__init__()
self.ffn_type = config.ffn_type
assert self.ffn_type in (1, 2)
if self.ffn_type in (1, 2):
self.wx0 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (2,):
self.wx1 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (1, 2):
self.output = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x):
if self.ffn_type in (1, 2):
x0 = self.wx0(x)
if self.ffn_type == 1:
x1 = x
elif self.ffn_type == 2:
x1 = self.wx1(x)
out = self.output(x0 * x1)
out = self.dropout(out)
out = self.LayerNorm(out + x)
return out
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.ffn_type = config.ffn_type
if self.ffn_type:
self.ffn = TransformerFFN(config)
else:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
if self.ffn_type:
layer_output = self.ffn(attention_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None,
seg_ids=None, key_history=None, value_history=None, rel_pos=None):
# history embedding and encoded layer must be simultanously given
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for i, layer_module in enumerate(self.layer):
set_key = None
if isinstance(key_history, list):
set_key = key_history if len(key_history) < len(self.layer) else key_history[i]
set_value = None
if isinstance(value_history, list):
set_value = value_history if len(key_history) < len(self.layer) else value_history[i]
hidden_states = layer_module(
hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids,
key_history=set_key, value_history=set_value, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
hid_size = config.hidden_size
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
hid_size *= config.relax_projection
self.dense = nn.Linear(config.hidden_size, hid_size)
self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(
bert_model_embedding_weights.size(0)))
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
self.relax_projection = config.relax_projection
else:
self.relax_projection = 0
self.fp32_embedding = config.fp32_embedding
def convert_to_type(tensor):
if self.fp32_embedding:
return tensor.half()
else:
return tensor
self.type_converter = convert_to_type
self.converted = False
def forward(self, hidden_states, task_idx=None):
if not self.converted:
self.converted = True
if self.fp32_embedding:
self.transform.half()
hidden_states = self.transform(self.type_converter(hidden_states))
if self.relax_projection > 1:
num_batch = hidden_states.size(0)
num_pos = hidden_states.size(1)
# (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid)
hidden_states = hidden_states.view(
num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
if self.fp32_embedding:
hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter(
self.decoder.weight), self.type_converter(self.bias))
else:
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights, num_labels=2):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, num_labels)
def forward(self, sequence_output, pooled_output, task_idx=None):
prediction_scores = self.predictions(sequence_output, task_idx)
if pooled_output is None:
seq_relationship_score = None
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
# module.weight.data.copy_(torch.Tensor(
# truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range))
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
logger.info("Model config {}".format(config))
# clean the arguments in kwargs
for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx',
'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing',
'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb',
'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'):
if arg_clean in kwargs:
del kwargs[arg_clean]
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.config = config
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None, rel_pos=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
if self.rel_pos_bias is not None:
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
# print("Rel pos size = %s" % str(rel_pos.size()))
else:
rel_pos = None
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids, rel_pos=rel_pos)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, mask_qkv=None, task_idx=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertPreTrainingPairTransform(nn.Module):
def __init__(self, config):
super(BertPreTrainingPairTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, pair_x, pair_y):
hidden_states = torch.cat([pair_x, pair_y], dim=-1)
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
# hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class BertForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s",
pos_shift=False):
super(BertForSeq2SeqDecoder, self).__init__(config)
self.bert = BertModelIncr(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
assert mode in ("s2s", "l2r")
self.mode = mode
self.pos_shift = pos_shift
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask,
task_idx=task_idx, mask_qkv=mask_qkv)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = new_embedding
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
else:
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
# print("Rel pos size = %s" % str(rel_pos.size()))
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos - input_length + 1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.div(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores))
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding)
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
else:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
if rel_pos is not None:
rel_pos = first_expand(rel_pos)
mask_ids = first_expand(mask_ids)
if mask_qkv is not None:
mask_qkv = first_expand(mask_qkv)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n - 1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not (
self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
# [(batch, beam)]
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
# first we need to find the eos frame where all symbols are eos
# any frames after the eos frame are invalid
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[i, :length, ...] = tensor
return out_tensor
# convert to tensors for DataParallel
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| 67,538 | 44.944898 | 139 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/config.py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from transformers import BertConfig
from tnlrv3.configuration_tnlrv3 import TuringNLRv3Config
logger = logging.getLogger(__name__)
class TuringNLRv3ForSeq2SeqConfig(BertConfig):
def __init__(self, label_smoothing=0.1, source_type_id=0, target_type_id=1,
rel_pos_bins=0, max_rel_pos=0, fix_word_embedding=False, **kwargs):
super(TuringNLRv3ForSeq2SeqConfig, self).__init__(**kwargs)
self.label_smoothing = label_smoothing
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
self.fix_word_embedding = fix_word_embedding
@classmethod
def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None, fix_word_embedding=False):
required_keys = [
"vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads",
"hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob",
"max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps",
]
kwargs = {}
for key in required_keys:
assert hasattr(config, key)
kwargs[key] = getattr(config, key)
kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"]
additional_keys = [
"source_type_id", "target_type_id", "rel_pos_bins", "max_rel_pos",
]
for key in additional_keys:
if hasattr(config, key):
kwargs[key] = getattr(config, key)
if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings:
kwargs["max_position_embeddings"] = max_position_embeddings
logger.info(" ** Change max position embeddings to %d ** " %
max_position_embeddings)
return cls(label_smoothing=label_smoothing, fix_word_embedding=fix_word_embedding, **kwargs)
| 2,104 | 41.959184 | 116 | py |
Tiny-NewsRec | Tiny-NewsRec-main/Tiny-NewsRec/tnlrv3/configuration_tnlrv3.py | # coding=utf-8
""" TuringNLRv3 model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
TuringNLRv3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
}
class TuringNLRv3Config(PretrainedConfig):
r"""
:class:`~transformers.TuringNLRv3Config` is the configuration class to store the configuration of a
`TuringNLRv3Model`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TuringNLRv3Model`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`TuringNLRv3Model`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = TuringNLRv3_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
source_type_id=0,
target_type_id=1,
**kwargs):
super(TuringNLRv3Config, self).__init__(**kwargs)
if isinstance(vocab_size, str) or (sys.version_info[0] == 2
and isinstance(vocab_size, unicode)):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.source_type_id = source_type_id
self.target_type_id = target_type_id
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| 4,136 | 45.483146 | 107 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/dataloader.py | import sys
import traceback
import logging
import random
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import torch
from torch.utils.data import IterableDataset
from streaming import StreamSampler
class DataLoaderTrain(IterableDataset):
def __init__(self,
data_dir,
filename_pat,
args,
world_size,
worker_rank,
cuda_device_idx,
news_index,
news_combined,
word_dict,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.shuffle_buffer_size = args.shuffle_buffer_size
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = -1
self.news_combined = news_combined
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_buffer_size=self.shuffle_buffer_size,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
self.sampler.__iter__()
def trans_to_nindex(self, nids):
return [self.news_index[i] if i in self.news_index else 0 for i in nids]
def pad_to_fix_len(self, x, fix_length, padding_front=True, padding_value=0):
if padding_front:
pad_x = [padding_value] * (fix_length-len(x)) + x[-fix_length:]
mask = [0] * (fix_length-len(x)) + [1] * min(fix_length, len(x))
else:
pad_x = x[-fix_length:] + [padding_value]*(fix_length-len(x))
mask = [1] * min(fix_length, len(x)) + [0] * (fix_length-len(x))
return pad_x, mask
def _produce(self):
# need to reset cuda device in produce thread.
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def start_async(self):
self.aval_count = 0
self.stopped = False
self.outputs = Queue(10)
self.pool = ThreadPoolExecutor(1)
self.pool.submit(self._produce)
def _process(self, batch):
batch = [x.decode(encoding="utf-8").split("\t") for x in batch]
user_feature_batch, log_mask_batch, news_feature_batch, label_batch = [], [], [], []
for line in batch:
click_docs = line[3].split()
sess_pos = line[4].split()
sess_neg = line[5].split()
click_docs, log_mask = self.pad_to_fix_len(
self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_combined[click_docs]
pos = self.trans_to_nindex(sess_pos)
neg = self.trans_to_nindex(sess_neg)
label = random.randint(0, self.npratio)
sample_news = neg[:label] + pos + neg[label:]
news_feature = self.news_combined[sample_news]
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(label)
if self.enable_gpu:
user_feature_batch = torch.LongTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
news_feature_batch = torch.LongTensor(news_feature_batch).cuda()
label_batch = torch.LongTensor(label_batch).cuda()
else:
user_feature_batch = torch.LongTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
news_feature_batch = torch.LongTensor(news_feature_batch)
label_batch = torch.LongTensor(label_batch)
return user_feature_batch, log_mask_batch, news_feature_batch, label_batch
def __iter__(self):
"""Implement IterableDataset method to provide data iterator."""
logging.info("DataLoader __iter__()")
if self.enable_prefetch:
self.join()
self.start_async()
else:
self.start()
return self
def __next__(self):
if self.sampler and self.sampler.reach_end() and self.aval_count == 0:
raise StopIteration
if self.enable_prefetch:
next_batch = self.outputs.get()
self.outputs.task_done()
self.aval_count -= 1
else:
next_batch = self._process(self.sampler.__next__())
return next_batch
def join(self):
self.stopped = True
if self.sampler:
if self.enable_prefetch:
while self.outputs.qsize() > 0:
self.outputs.get()
self.outputs.task_done()
self.outputs.join()
self.pool.shutdown(wait=True)
logging.info("shut down pool.")
self.sampler = None
class DataLoaderTest(DataLoaderTrain):
def __init__(self,
data_dir,
filename_pat,
args,
world_size,
worker_rank,
cuda_device_idx,
news_index,
news_scoring,
word_dict,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = -1
self.news_scoring = news_scoring
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
self.sampler.__iter__()
def _produce(self):
# need to reset cuda device in produce thread.
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(
data_dir=self.data_dir,
filename_pat=self.filename_pat,
batch_size=self.batch_size,
worker_rank=self.worker_rank,
world_size=self.world_size,
enable_shuffle=self.enable_shuffle,
shuffle_seed=self.epoch, # epoch id as shuffle random seed
)
# t0 = time.time()
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
# logging.info(f"_produce cost:{time.time()-t0}")
# t0 = time.time()
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def _process(self, batch):
batch_size = len(batch)
batch = [x.decode(encoding="utf-8").split("\t") for x in batch]
user_feature_batch, log_mask_batch, news_feature_batch, label_batch = [], [], [], []
for line in batch:
click_docs = line[3].split()
click_docs, log_mask = self.pad_to_fix_len(
self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_scoring[click_docs]
sample_news = self.trans_to_nindex(
[i.split('-')[0] for i in line[4].split()])
labels = [int(i.split('-')[1]) for i in line[4].split()]
news_feature = self.news_scoring[sample_news]
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(np.array(labels))
if self.enable_gpu:
user_feature_batch = torch.FloatTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
else:
user_feature_batch = torch.FloatTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
return user_feature_batch, log_mask_batch, news_feature_batch, label_batch
| 10,287 | 34.84669 | 92 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/utils.py | import logging
import os
import sys
import torch
import numpy as np
import argparse
import re
from tnlrv3.modeling import TuringNLRv3ForSequenceClassification
from tnlrv3.configuration_tnlrv3 import TuringNLRv3Config
from tnlrv3.tokenization_tnlrv3 import TuringNLRv3Tokenizer
from transformers import BertTokenizer, BertConfig, BertModel
from transformers import RobertaTokenizer, RobertaConfig, RobertaModel
MODEL_CLASSES = {
'tnlrv3': (TuringNLRv3Config, TuringNLRv3ForSequenceClassification, TuringNLRv3Tokenizer),
'bert': (BertConfig, BertModel, BertTokenizer),
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)
}
def word_tokenize(sent):
pat = re.compile(r'[\w]+|[.,!?;|]')
if isinstance(sent, str):
return pat.findall(sent.lower())
else:
return []
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def init_hvd_cuda(enable_hvd=True, enable_gpu=True):
hvd = None
if enable_hvd:
import horovod.torch as hvd
hvd.init()
logging.info(
f"hvd_size:{hvd.size()}, hvd_rank:{hvd.rank()}, hvd_local_rank:{hvd.local_rank()}"
)
hvd_size = hvd.size() if enable_hvd else 1
hvd_rank = hvd.rank() if enable_hvd else 0
hvd_local_rank = hvd.local_rank() if enable_hvd else 0
if enable_gpu:
torch.cuda.set_device(hvd_local_rank)
return hvd_size, hvd_rank, hvd_local_rank
def setuplogger():
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s %(asctime)s] %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
def dump_args(args):
for arg in dir(args):
if not arg.startswith("_"):
logging.info(f"args[{arg}]={getattr(args, arg)}")
def acc(y_true, y_hat):
y_hat = torch.argmax(y_hat, dim=-1)
tot = y_true.shape[0]
hit = torch.sum(y_true == y_hat)
return hit.data.float() * 1.0 / tot
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2**y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def load_matrix(embedding_file_path, word_dict, word_embedding_dim):
embedding_matrix = np.zeros(shape=(len(word_dict) + 1,
word_embedding_dim))
have_word = []
if embedding_file_path is not None:
with open(embedding_file_path, 'rb') as f:
while True:
line = f.readline()
if len(line) == 0:
break
line = line.split()
word = line[0].decode()
if word in word_dict:
index = word_dict[word]
tp = [float(x) for x in line[1:]]
embedding_matrix[index] = np.array(tp)
have_word.append(word)
return embedding_matrix, have_word
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def get_checkpoint(directory, ckpt_name):
ckpt_path = os.path.join(directory, ckpt_name)
if os.path.exists(ckpt_path):
return ckpt_path
else:
return None
| 4,173 | 27.589041 | 94 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/run.py | import numpy as np
import torch
import logging
from tqdm.auto import tqdm
import torch.optim as optim
import utils
import os
from pathlib import Path
import random
from dataloader import DataLoaderTrain, DataLoaderTest
from torch.utils.data import Dataset, DataLoader
from streaming import get_stat, get_worker_files
from parameters import parse_args
from preprocess import read_news_bert, get_doc_input_bert
from model_bert import ModelBert
def train(args):
if args.enable_hvd:
import horovod.torch as hvd
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
stat = get_stat(args.train_data_dir, args.filename_pat)
print(stat)
data_paths = get_worker_files(args.train_data_dir,
hvd_rank, hvd_size, args.filename_pat, args.enable_shuffle, 0
)
sample_num = 0
for file in data_paths:
sample_num += stat[file]
logging.info("[{}] contains {} samples {} steps".format(
hvd_rank, sample_num, sample_num // args.batch_size))
news, news_index, category_dict, subcategory_dict = read_news_bert(
os.path.join(args.train_data_dir, 'news.tsv'), args, mode='train'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=-1)
model = ModelBert(args)
if args.use_pretrain_model:
ckpt = torch.load(args.pretrain_model_path, map_location='cpu')
pretrained_dict = ckpt["model_state_dict"]
model_dict = model.state_dict()
remain_key = list(model_dict.keys())
pretrained_key = []
for k, v in pretrained_dict.items():
if not k.startswith('student'):
continue
key = k
model_dict[key].copy_(v)
pretrained_key.append(key)
remain_key.remove(key)
model.load_state_dict(model_dict)
if hvd_rank == 0:
logging.info(f"loaded pretrain model: {args.pretrain_model_path}")
print(f'{len(pretrained_key)} loaded pretrained parameters:')
for k in pretrained_key:
print(f'\t{k}')
print(f'{len(remain_key)} randomly initialized parameters:')
for k in remain_key:
print(f'\t{k}')
del ckpt
torch.cuda.empty_cache()
for param in model.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.news_encoder.bert_model.bert.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
if args.enable_gpu:
model = model.cuda()
pretrained_param = []
rest_param = []
for name, param in model.named_parameters():
if name in pretrained_key:
pretrained_param.append(param)
else:
rest_param.append(param)
optimizer = torch.optim.Adam([
{'params': pretrained_param, 'lr': args.pretrain_lr},
{'params': rest_param, 'lr': args.lr}], amsgrad=True)
else:
if args.model_type == 'tnlrv3':
for param in model.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.news_encoder.bert_model.bert.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
else:
for param in model.news_encoder.bert_model.parameters():
param.requires_grad = False
for index, layer in enumerate(model.news_encoder.bert_model.encoder.layer):
if index in args.bert_trainable_layer:
logging.info(f"finetune block {index}")
for param in layer.parameters():
param.requires_grad = True
if args.enable_gpu:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, amsgrad=True)
word_dict = None
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
checkpoint = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
logging.info(f"Model loaded from {ckpt_path}")
if hvd_rank == 0:
print(model)
for name, param in model.named_parameters():
print(name, param.requires_grad)
if args.enable_hvd:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
compression = hvd.Compression.none
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Average)
dataloader = DataLoaderTrain(
news_index=news_index,
news_combined=news_combined,
word_dict=word_dict,
data_dir=args.train_data_dir,
filename_pat=args.filename_pat,
args=args,
world_size=hvd_size,
worker_rank=hvd_rank,
cuda_device_idx=hvd_local_rank,
enable_prefetch=True,
enable_shuffle=True,
enable_gpu=args.enable_gpu,
)
logging.info('Training...')
for ep in range(args.start_epoch, args.epochs):
loss = 0.0
accuary = 0.0
for cnt, (log_ids, log_mask, input_ids, targets) in enumerate(dataloader):
if cnt > args.max_steps_per_epoch:
break
bz_loss, y_hat = model(log_ids, log_mask, input_ids, targets)
loss += bz_loss.data.float()
accuary += utils.acc(targets, y_hat)
optimizer.zero_grad()
bz_loss.backward()
optimizer.step()
if cnt % args.log_steps == 0:
logging.info(
'[{}] Ed: {}, train_loss: {:.5f}, acc: {:.5f}'.format(
hvd_rank, cnt * args.batch_size, loss.data / cnt,
accuary / cnt))
loss /= cnt
print(ep + 1, loss)
# save model last of epoch
if hvd_rank == 0:
ckpt_path = os.path.join(args.model_dir, f'epoch-{ep+1}.pt')
torch.save(
{
'model_state_dict': model.state_dict(),
'category_dict': category_dict,
'word_dict': word_dict,
'subcategory_dict': subcategory_dict
}, ckpt_path)
logging.info(f"Model saved to {ckpt_path}")
dataloader.join()
def test(args):
if args.enable_hvd:
import horovod.torch as hvd
hvd_size, hvd_rank, hvd_local_rank = utils.init_hvd_cuda(
args.enable_hvd, args.enable_gpu)
if args.load_ckpt_name is not None:
ckpt_path = utils.get_checkpoint(args.model_dir, args.load_ckpt_name)
else:
ckpt_path = utils.latest_checkpoint(args.model_dir)
assert ckpt_path is not None, 'No ckpt found'
checkpoint = torch.load(ckpt_path)
subcategory_dict = checkpoint['subcategory_dict']
category_dict = checkpoint['category_dict']
word_dict = checkpoint['word_dict']
model = ModelBert(args)
if args.enable_gpu:
model.cuda()
model.load_state_dict(checkpoint['model_state_dict'])
logging.info(f"Model loaded from {ckpt_path}")
if args.enable_hvd:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
model.eval()
torch.set_grad_enabled(False)
news, news_index = read_news_bert(
os.path.join(args.test_data_dir, 'news.tsv'), args, mode='test'
)
news_title, news_title_attmask, news_category, news_subcategory = get_doc_input_bert(
news, news_index, category_dict, subcategory_dict, args)
news_combined = np.concatenate([news_title, news_title_attmask], axis=1)
class NewsDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.data.shape[0]
def news_collate_fn(arr):
arr = torch.LongTensor(arr)
return arr
news_dataset = NewsDataset(news_combined)
news_dataloader = DataLoader(news_dataset,
batch_size=args.batch_size * 4,
num_workers=args.num_workers,
collate_fn=news_collate_fn)
news_scoring = []
with torch.no_grad():
for input_ids in tqdm(news_dataloader):
input_ids = input_ids.cuda()
news_vec = model.news_encoder(input_ids)
news_vec = news_vec.to(torch.device("cpu")).detach().numpy()
news_scoring.extend(news_vec)
news_scoring = np.array(news_scoring)
logging.info("news scoring num: {}".format(news_scoring.shape[0]))
doc_sim = 0
for _ in tqdm(range(1000000)):
i = random.randrange(1, len(news_scoring))
j = random.randrange(1, len(news_scoring))
if i != j:
doc_sim += np.dot(news_scoring[i], news_scoring[j]) / (
np.linalg.norm(news_scoring[i]) * np.linalg.norm(news_scoring[j]))
print(f'=== doc-sim: {doc_sim / 1000000} ===')
dataloader = DataLoaderTest(
news_index=news_index,
news_scoring=news_scoring,
word_dict=word_dict,
data_dir=args.test_data_dir,
filename_pat=args.filename_pat,
args=args,
world_size=hvd_size,
worker_rank=hvd_rank,
cuda_device_idx=hvd_local_rank,
enable_prefetch=True,
enable_shuffle=False,
enable_gpu=args.enable_gpu,
)
from metrics import roc_auc_score, ndcg_score, mrr_score
AUC = []
MRR = []
nDCG5 = []
nDCG10 = []
def print_metrics(hvd_local_rank, cnt, x):
logging.info("[{}] Ed: {}: {}".format(hvd_local_rank, cnt,
'\t'.join(["{:0.2f}".format(i * 100) for i in x])))
def get_mean(arr):
return [np.array(i).mean() for i in arr]
def get_sum(arr):
return [np.array(i).sum() for i in arr]
local_sample_num = 0
for cnt, (log_vecs, log_mask, news_vecs, labels) in enumerate(dataloader):
local_sample_num += log_vecs.shape[0]
user_vecs = model.user_encoder(log_vecs, log_mask).to(
torch.device("cpu")).detach().numpy()
for user_vec, news_vec, label in zip(user_vecs, news_vecs, labels):
if label.mean() == 0 or label.mean() == 1:
continue
score = np.dot(news_vec, user_vec)
auc = roc_auc_score(label, score)
mrr = mrr_score(label, score)
ndcg5 = ndcg_score(label, score, k=5)
ndcg10 = ndcg_score(label, score, k=10)
AUC.append(auc)
MRR.append(mrr)
nDCG5.append(ndcg5)
nDCG10.append(ndcg10)
if cnt % args.log_steps == 0:
print_metrics(hvd_rank, local_sample_num,
get_mean([AUC, MRR, nDCG5, nDCG10]))
# stop scoring
dataloader.join()
logging.info('[{}] local_sample_num: {}'.format(
hvd_rank, local_sample_num))
total_sample_num = hvd.allreduce(
torch.tensor(local_sample_num), op=hvd.Sum)
local_metrics_sum = get_sum([AUC, MRR, nDCG5, nDCG10])
total_metrics_sum = hvd.allreduce(torch.tensor(
local_metrics_sum, dtype=float), op=hvd.Sum)
if hvd_rank == 0:
print_metrics(hvd_rank, total_sample_num,
total_metrics_sum / total_sample_num)
if __name__ == "__main__":
utils.setuplogger()
args = parse_args()
Path(args.model_dir).mkdir(parents=True, exist_ok=True)
if 'train' in args.mode:
train(args)
if 'test' in args.mode:
test(args)
| 12,540 | 32.265252 | 97 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/streaming.py | import os
import logging
import fnmatch
import random
import numpy as np
import tensorflow as tf
import subprocess
def get_stat(dirname, filename_pat="*"):
if not tf.io.gfile.exists(dirname):
logging.warning(f"{dirname} does not exist!")
return None
stat = {}
for x in tf.io.gfile.listdir(dirname):
if fnmatch.fnmatch(x, filename_pat):
file = os.path.join(dirname, x)
result = subprocess.getoutput(f'wc -l {file}')
size = int(result.split(' ')[0])
stat[file] = size
return stat
def get_files(dirname, filename_pat="*", recursive=False):
if not tf.io.gfile.exists(dirname):
logging.warning(f"{dirname} does not exist!")
return None
files = []
for x in tf.io.gfile.listdir(dirname):
path = os.path.join(dirname, x)
if tf.io.gfile.isdir(path):
if recursive:
files.extend(get_files(path, filename_pat))
elif fnmatch.fnmatch(x, filename_pat):
files.append(path)
return files
def get_worker_files(dirname,
worker_rank,
world_size,
filename_pat="*",
shuffle=False,
seed=0):
"""Get file paths belong to one worker."""
all_files = get_files(dirname, filename_pat)
all_files.sort()
if shuffle:
random.seed(seed)
random.shuffle(all_files)
files = []
for i in range(worker_rank, len(all_files), world_size):
files.append(all_files[i])
logging.info(
f"worker_rank:{worker_rank}, world_size:{world_size}, shuffle:{shuffle}, seed:{seed}, directory:{dirname}, files:{files}"
)
return files
class StreamReader:
def __init__(self, data_paths, batch_size, shuffle=False, shuffle_buffer_size=1000):
tf.config.experimental.set_visible_devices([], device_type="GPU")
path_len = len(data_paths)
dataset = tf.data.Dataset.list_files(data_paths).interleave(
lambda x: tf.data.TextLineDataset(x),
cycle_length=path_len,
block_length=128,
num_parallel_calls=min(path_len, 64),
)
if shuffle:
dataset = dataset.shuffle(
shuffle_buffer_size, reshuffle_each_iteration=True)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
self.next_batch = dataset.make_one_shot_iterator().get_next()
self.session = None
def reset(self):
if self.session:
self.session.close()
self.session = tf.Session()
self.endofstream = False
def get_next(self):
try:
ret = self.session.run(self.next_batch)
except tf.errors.OutOfRangeError:
self.endofstream = True
return None
return ret
def reach_end(self):
return self.endofstream
class StreamSampler:
def __init__(
self,
data_dir,
filename_pat,
batch_size,
worker_rank,
world_size,
enable_shuffle=False,
shuffle_buffer_size=1000,
shuffle_seed=0,
):
data_paths = get_worker_files(
data_dir,
worker_rank,
world_size,
filename_pat,
shuffle=enable_shuffle,
seed=shuffle_seed,
)
self.stream_reader = StreamReader(
data_paths,
batch_size,
enable_shuffle,
shuffle_buffer_size
)
def __iter__(self):
self.stream_reader.reset()
return self
def __next__(self):
"""Implement iterator interface."""
next_batch = self.stream_reader.get_next()
if not isinstance(next_batch, np.ndarray) and not isinstance(
next_batch, tuple):
raise StopIteration
return next_batch
def reach_end(self):
return self.stream_reader.reach_end()
| 3,990 | 27.507143 | 129 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/parameters.py | import argparse
import utils
import logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode",
type=str,
default="train",
choices=['train', 'test'])
parser.add_argument(
"--train_data_dir",
type=str,
default="../MIND/MINDlarge_train",
)
parser.add_argument(
"--test_data_dir",
type=str,
default="../MIND/MINDlarge_test",
)
parser.add_argument("--filename_pat", type=str,
default="behaviors_np4_*.tsv")
parser.add_argument("--model_dir", type=str, default='./model')
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--npratio", type=int, default=4)
parser.add_argument("--enable_gpu", type=utils.str2bool, default=True)
parser.add_argument("--enable_hvd", type=utils.str2bool, default=True)
parser.add_argument("--enable_shuffle", type=utils.str2bool, default=True)
parser.add_argument("--shuffle_buffer_size", type=int, default=10000)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--filter_num", type=int, default=3)
parser.add_argument("--log_steps", type=int, default=100)
# model training
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--num_words_title", type=int, default=20)
parser.add_argument("--num_words_abstract", type=int, default=50)
parser.add_argument("--num_words_body", type=int, default=100)
parser.add_argument(
"--user_log_length",
type=int,
default=50,
)
parser.add_argument(
"--word_embedding_dim",
type=int,
default=300,
)
parser.add_argument(
"--glove_embedding_path",
type=str,
default='./glove.840B.300d.txt',
)
parser.add_argument("--freeze_embedding",
type=utils.str2bool,
default=False)
parser.add_argument(
"--news_dim",
type=int,
default=64,
)
parser.add_argument(
"--news_query_vector_dim",
type=int,
default=200,
)
parser.add_argument(
"--user_query_vector_dim",
type=int,
default=200,
)
parser.add_argument(
"--num_attention_heads",
type=int,
default=20,
)
parser.add_argument("--user_log_mask", type=utils.str2bool, default=True)
parser.add_argument("--drop_rate", type=float, default=0.2)
parser.add_argument("--save_steps", type=int, default=1000)
parser.add_argument("--max_steps_per_epoch", type=int, default=1000000)
parser.add_argument(
"--load_ckpt_name",
type=str,
default=None,
help="choose which ckpt to load and test"
)
# bert
parser.add_argument("--apply_bert", type=utils.str2bool, default=False)
parser.add_argument("--model_type", default="bert", type=str)
parser.add_argument("--do_lower_case", type=utils.str2bool, default=True)
parser.add_argument(
"--model_name", default="../bert-base-uncased/pytorch_model.bin", type=str)
parser.add_argument(
"--config_name", default="../bert-base-uncased/config.json", type=str)
parser.add_argument("--tokenizer_name",
default="../bert-base-uncased/vocab.txt", type=str)
parser.add_argument("--num_hidden_layers", type=int, default=8)
parser.add_argument(
"--bert_trainable_layer",
type=int, nargs='+',
default=[],
choices=list(range(12)))
parser.add_argument("--model", type=str, default=None)
parser.add_argument("--pooling", type=str, default='att')
parser.add_argument("--start_epoch", type=int, default=0)
parser.add_argument("--use_pretrain_model",
type=utils.str2bool, default=False)
parser.add_argument("--pretrain_model_path", type=str, default=None)
parser.add_argument("--pretrain_lr", type=float, default=0.00001)
args = parser.parse_args()
logging.info(args)
return args
if __name__ == "__main__":
args = parse_args()
| 4,252 | 32.753968 | 83 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/metrics.py | from sklearn.metrics import roc_auc_score
import numpy as np
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2**y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def ctr_score(y_true, y_score, k=1):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
return np.mean(y_true)
| 793 | 25.466667 | 52 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/model_bert.py | import numpy as np
import torch
from torch import nn
from utils import MODEL_CLASSES
class AttentionPooling(nn.Module):
def __init__(self, emb_size, hidden_size):
super(AttentionPooling, self).__init__()
self.att_fc1 = nn.Linear(emb_size, hidden_size)
self.att_fc2 = nn.Linear(hidden_size, 1)
def forward(self, x, attn_mask=None):
"""
Args:
x: batch_size, candidate_size, emb_dim
attn_mask: batch_size, candidate_size
Returns:
(shape) batch_size, emb_dim
"""
bz = x.shape[0]
e = self.att_fc1(x)
e = nn.Tanh()(e)
alpha = self.att_fc2(e)
alpha = torch.exp(alpha)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)
x = torch.bmm(x.permute(0, 2, 1), alpha).squeeze(dim=-1)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask=None):
'''
Q: batch_size, n_head, candidate_num, d_k
K: batch_size, n_head, candidate_num, d_k
V: batch_size, n_head, candidate_num, d_v
attn_mask: batch_size, n_head, candidate_num
Return: batch_size, n_head, candidate_num, d_v
'''
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
scores = torch.exp(scores)
if attn_mask is not None:
scores = scores * attn_mask.unsqueeze(dim=-2)
attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)
context = torch.matmul(attn, V)
return context
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_model, n_heads, d_k, d_v):
super(MultiHeadSelfAttention, self).__init__()
self.d_model = d_model
self.n_heads = n_heads
self.d_k = d_k
self.d_v = d_v
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, Q, K, V, mask=None):
'''
Q: batch_size, candidate_num, d_model
K: batch_size, candidate_num, d_model
V: batch_size, candidate_num, d_model
mask: batch_size, candidate_num
'''
batch_size = Q.shape[0]
if mask is not None:
mask = mask.unsqueeze(dim=1).expand(-1, self.n_heads, -1)
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
context = self.scaled_dot_product_attn(q_s, k_s, v_s, mask)
output = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
return output
class NewsEncoder(nn.Module):
def __init__(self, args):
super(NewsEncoder, self).__init__()
self.pooling = args.pooling
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
self.output_index = 3 if args.model_type == 'tnlrv3' else 2
self.bert_config = config_class.from_pretrained(args.config_name,
output_hidden_states=True,
num_hidden_layers=args.num_hidden_layers)
self.bert_model = model_class.from_pretrained(args.model_name, config=self.bert_config)
if args.pooling == 'att':
self.attn = AttentionPooling(self.bert_config.hidden_size, args.news_query_vector_dim)
self.dense = nn.Linear(self.bert_config.hidden_size, args.news_dim)
def forward(self, x):
'''
x: batch_size, word_num * 2
mask: batch_size, word_num
'''
batch_size, num_words = x.shape
num_words = num_words // 2
text_ids = torch.narrow(x, 1, 0, num_words)
text_attmask = torch.narrow(x, 1, num_words, num_words)
word_vecs = self.bert_model(
text_ids, text_attmask)[self.output_index][self.bert_config.num_hidden_layers]
if self.pooling == 'cls':
news_vec = torch.narrow(word_vecs, 1, 0, 1).squeeze(dim=1)
elif self.pooling == 'att':
news_vec = self.attn(word_vecs)
else:
news_vec = torch.mean(word_vecs, dim=1)
news_vec = self.dense(news_vec)
return news_vec
class UserEncoder(nn.Module):
def __init__(self, args):
super(UserEncoder, self).__init__()
self.args = args
if args.model == 'NRMS':
self.multi_head_self_attn = MultiHeadSelfAttention(args.news_dim,
args.num_attention_heads, 16, 16)
self.attn = AttentionPooling(args.num_attention_heads * 16, args.user_query_vector_dim)
else:
self.attn = AttentionPooling(args.news_dim, args.user_query_vector_dim)
self.pad_doc = nn.Parameter(torch.empty(1,
args.news_dim).uniform_(-1,
1)).type(torch.FloatTensor)
def forward(self, news_vecs, log_mask=None):
'''
news_vecs: batch_size, history_num, news_dim
log_mask: batch_size, history_num
'''
bz = news_vecs.shape[0]
if self.args.user_log_mask:
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(news_vecs, news_vecs, news_vecs, log_mask)
user_vec = self.attn(news_vecs, log_mask)
else:
user_vec = self.attn(news_vecs, log_mask)
else:
padding_doc = self.pad_doc.unsqueeze(dim=0).expand(bz, self.args.user_log_length, -1)
news_vecs = news_vecs * \
log_mask.unsqueeze(dim=-1) + padding_doc * \
(1 - log_mask.unsqueeze(dim=-1))
if self.args.model == 'NRMS':
news_vecs = self.multi_head_self_attn(news_vecs, news_vecs, news_vecs)
user_vec = self.attn(news_vecs)
else:
user_vec = self.attn(news_vecs)
return user_vec
class ModelBert(torch.nn.Module):
def __init__(self, args):
super(ModelBert, self).__init__()
self.args = args
self.news_encoder = NewsEncoder(args)
self.user_encoder = UserEncoder(args)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, history, history_mask, candidate, label):
'''
history: batch_size, history_length, num_word_title * 2
history_mask: batch_size, history_length
candidate: batch_size, 1+K, num_word_title * 2
label: batch_size, 1+K
'''
batch_size = history.shape[0]
input_id_num = history.shape[-1]
candidate_news = candidate.reshape(-1, input_id_num)
candidate_news_vecs = self.news_encoder(candidate_news).reshape(
batch_size, -1, self.args.news_dim)
history_news = history.reshape(-1, input_id_num)
history_news_vecs = self.news_encoder(history_news).reshape(-1, self.args.user_log_length,
self.args.news_dim)
user_vec = self.user_encoder(history_news_vecs, history_mask)
score = torch.bmm(candidate_news_vecs, user_vec.unsqueeze(dim=-1)).squeeze(dim=-1)
loss = self.loss_fn(score, label)
return loss, score
| 8,077 | 37.836538 | 99 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/preprocess.py | import tensorflow as tf
from tqdm import tqdm
import numpy as np
from utils import MODEL_CLASSES
def update_dict(dict, key, value=None):
if key not in dict:
if value is None:
dict[key] = len(dict) + 1
else:
dict[key] = value
def read_news_bert(news_path, args, mode='train'):
news = {}
category_dict = {}
subcategory_dict = {}
news_index = {}
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name, do_lower_case=True)
with tf.io.gfile.GFile(news_path, "r") as f:
for line in tqdm(f):
splited = line.strip('\n').split('\t')
doc_id, category, subcategory, title, _, _, _, _ = splited
update_dict(news_index, doc_id)
title = title.lower()
title = tokenizer(title, max_length=args.num_words_title,
pad_to_max_length=True, truncation=True)
update_dict(news, doc_id, [title, category, subcategory])
if mode == 'train':
update_dict(category_dict, category)
update_dict(subcategory_dict, subcategory)
if mode == 'train':
return news, news_index, category_dict, subcategory_dict
elif mode == 'test':
return news, news_index
else:
assert False, 'Wrong mode!'
def get_doc_input_bert(news, news_index, category_dict, subcategory_dict, args):
news_num = len(news) + 1
news_title = np.zeros((news_num, args.num_words_title), dtype='int32')
news_title_attmask = np.zeros(
(news_num, args.num_words_title), dtype='int32')
news_category = np.zeros((news_num, 1), dtype='int32')
news_subcategory = np.zeros((news_num, 1), dtype='int32')
for key in tqdm(news):
title, category, subcategory = news[key]
doc_index = news_index[key]
news_title[doc_index] = title['input_ids']
news_title_attmask[doc_index] = title['attention_mask']
news_category[doc_index,
0] = category_dict[category] if category in category_dict else 0
news_subcategory[doc_index,
0] = subcategory_dict[subcategory] if subcategory in subcategory_dict else 0
return news_title, news_title_attmask, news_category, news_subcategory
| 2,379 | 33.492754 | 101 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/convert_state_dict.py | import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def get_checkpoint_from_transformer_cache(
archive_file, pretrained_model_name_or_path, pretrained_model_archive_map,
cache_dir, force_download, proxies, resume_download,
):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download,
proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file)
else:
msg = "Model name '{}' was not found in model name list ({}). " \
"We assumed '{}' was a path or url to model weight files named one of {} but " \
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
', '.join(pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME])
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
return torch.load(resolved_archive_file, map_location='cpu')
def load_model(state_dict):
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.endswith("attention.self.q_bias"):
new_state_dict[key.replace(
"attention.self.q_bias", "attention.self.query.bias")] = value.view(-1)
elif key.endswith("attention.self.v_bias"):
new_state_dict[key.replace(
"attention.self.v_bias", "attention.self.value.bias")] = value.view(-1)
new_state_dict[key.replace(
"attention.self.v_bias", "attention.self.key.bias")] = torch.zeros_like(value.view(-1))
elif key.endswith("attention.self.qkv_linear.weight"):
l, _ = value.size()
assert l % 3 == 0
l = l // 3
q, k, v = torch.split(
value, split_size_or_sections=(l, l, l), dim=0)
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.query.weight")] = q
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.key.weight")] = k
new_state_dict[key.replace(
"attention.self.qkv_linear.weight", "attention.self.value.weight")] = v
elif key == "bert.encoder.rel_pos_bias.weight":
new_state_dict["bert.rel_pos_bias.weight"] = value
else:
new_state_dict[key] = value
del state_dict
return new_state_dict
state_dict_convert = {
'tnlrv3': load_model,
}
| 3,162 | 40.077922 | 109 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/tokenization_tnlrv3.py | # coding=utf-8
"""Tokenization classes for TuringNLRv3."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
}
class TuringNLRv3Tokenizer(BertTokenizer):
r"""
Constructs a TuringNLRv3Tokenizer.
:class:`~transformers.TuringNLRv3Tokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| 1,646 | 31.94 | 145 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/s2s_loader.py | import numpy as np
from random import randint
import logging
import torch
import torch.utils.data
logger = logging.getLogger(__name__)
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.word_subsample_prb = None
self.sp_prob = None
self.pieces_dir = None
self.vocab_words = None
self.pieces_threshold = 10
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def __call__(self, instance):
raise NotImplementedError
class Preprocess4Seq2seqDecoder(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128,
mode="s2s", pos_shift=False, source_type_id=0, target_type_id=1,
cls_token='[CLS]', sep_token='[SEP]', pad_token='[PAD]'):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.task_idx = 3 # relax projection layer for different tasks
assert mode in ("s2s", "l2r")
self.mode = mode
self.max_tgt_length = max_tgt_length
self.pos_shift = pos_shift
self.delta = 1 if pos_shift else 2
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.cc = 0
def __call__(self, instance):
tokens_a, max_a_len = instance
padded_tokens_a = [self.cls_token] + tokens_a
if not self.pos_shift:
padded_tokens_a = padded_tokens_a + [self.sep_token]
assert len(padded_tokens_a) <= max_a_len + self.delta
if max_a_len + self.delta > len(padded_tokens_a):
padded_tokens_a += [self.pad_token] * \
(max_a_len + self.delta - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + self.delta
max_len_in_batch = min(self.max_tgt_length +
max_a_len + self.delta, self.max_len)
tokens = padded_tokens_a
segment_ids = [self.source_type_id] * (len(padded_tokens_a)) \
+ [self.target_type_id] * (max_len_in_batch - len(padded_tokens_a))
mask_qkv = None
position_ids = []
for i in range(len(tokens_a) + self.delta):
position_ids.append(i)
for i in range(len(tokens_a) + self.delta, max_a_len + self.delta):
position_ids.append(0)
for i in range(max_a_len + self.delta, max_len_in_batch):
position_ids.append(
i - (max_a_len + self.delta) + len(tokens_a) + self.delta)
# Token Indexing
input_ids = self.indexer(tokens)
self.cc += 1
if self.cc < 20:
# print("Vocab size = %d" % len(self.vocab_words))
# for tk_id in input_ids:
# print(u"trans %d -> %s" % (tk_id, self.vocab_words[tk_id]))
logger.info(u"Input src = %s" % " ".join(
(self.vocab_words[tk_id]) for tk_id in input_ids))
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
if self.mode == "s2s":
input_mask[:, :len(tokens_a) + self.delta].fill_(1)
else:
st, end = 0, len(tokens_a) + self.delta
input_mask[st:end, st:end].copy_(
self._tril_matrix[:end, :end])
input_mask[end:, :len(tokens_a) + self.delta].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)
| 5,318 | 32.878981 | 90 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/modeling.py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from transformers.modeling_bert import \
BertPreTrainedModel, BertSelfOutput, BertIntermediate, \
BertOutput, BertPredictionHeadTransform, BertPooler
from transformers.file_utils import WEIGHTS_NAME
from tnlrv3.config import TuringNLRv3ForSeq2SeqConfig
from tnlrv3.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert
logger = logging.getLogger(__name__)
BertLayerNorm = torch.nn.LayerNorm
TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP = {
}
class TuringNLRv3PreTrainedModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = TuringNLRv3ForSeq2SeqConfig
supported_convert_pretrained_model_archive_map = {
"tnlrv3": TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP,
}
base_model_prefix = "TuringNLRv3_for_seq2seq"
pretrained_model_archive_map = {
**TuringNLRv3_PRETRAINED_MODEL_ARCHIVE_MAP,
}
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, reuse_position_embedding=None,
replace_prefix=None, *model_args, **kwargs,
):
model_type = kwargs.pop('model_type', 'tnlrv3')
if model_type is not None and "state_dict" not in kwargs:
if model_type in cls.supported_convert_pretrained_model_archive_map:
pretrained_model_archive_map = cls.supported_convert_pretrained_model_archive_map[
model_type]
if pretrained_model_name_or_path in pretrained_model_archive_map:
state_dict = get_checkpoint_from_transformer_cache(
archive_file=pretrained_model_archive_map[pretrained_model_name_or_path],
pretrained_model_name_or_path=pretrained_model_name_or_path,
pretrained_model_archive_map=pretrained_model_archive_map,
cache_dir=kwargs.get("cache_dir", None), force_download=kwargs.get("force_download", None),
proxies=kwargs.get("proxies", None), resume_download=kwargs.get("resume_download", None),
)
state_dict = state_dict_convert[model_type](state_dict)
kwargs["state_dict"] = state_dict
logger.info("Load HF ckpts")
elif os.path.isfile(pretrained_model_name_or_path):
state_dict = torch.load(
pretrained_model_name_or_path, map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](
state_dict)
logger.info("Load local ckpts")
elif os.path.isdir(pretrained_model_name_or_path):
state_dict = torch.load(os.path.join(
pretrained_model_name_or_path, WEIGHTS_NAME), map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](
state_dict)
logger.info("Load local ckpts")
else:
raise RuntimeError(
"Not fined the pre-trained checkpoint !")
if kwargs["state_dict"] is None:
logger.info("TNLRv3 does't support the model !")
raise NotImplementedError()
config = kwargs["config"]
state_dict = kwargs["state_dict"]
# initialize new position embeddings (From Microsoft/UniLM)
_k = 'bert.embeddings.position_embeddings.weight'
if _k in state_dict:
if config.max_position_embeddings > state_dict[_k].shape[0]:
logger.info("Resize > position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(
data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(
mean=0.0, std=config.initializer_range)
max_range = config.max_position_embeddings if reuse_position_embedding else old_vocab_size
shift = 0
while shift < max_range:
delta = min(old_vocab_size, max_range - shift)
new_postion_embedding.data[shift: shift +
delta, :] = state_dict[_k][:delta, :]
logger.info(" CP [%d ~ %d] into [%d ~ %d] " %
(0, delta, shift, shift + delta))
shift += delta
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
elif config.max_position_embeddings < state_dict[_k].shape[0]:
logger.info("Resize < position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(
data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(
mean=0.0, std=config.initializer_range)
new_postion_embedding.data.copy_(
state_dict[_k][:config.max_position_embeddings, :])
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
if replace_prefix is not None:
new_state_dict = {}
for key in state_dict:
if key.startswith(replace_prefix):
new_state_dict[key[len(replace_prefix):]] = state_dict[key]
else:
new_state_dict[key] = state_dict[key]
kwargs["state_dict"] = new_state_dict
del state_dict
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0)
fix_word_embedding = getattr(config, "fix_word_embedding", None)
if fix_word_embedding:
self.word_embeddings.weight.requires_grad = False
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
else:
self.token_type_embeddings = None
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
if self.token_type_embeddings:
embeddings = embeddings + \
self.token_type_embeddings(token_type_ids)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings, position_ids
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def multi_head_attention(self, query, key, value, attention_mask, rel_pos):
query_layer = self.transpose_for_scores(query)
key_layer = self.transpose_for_scores(key)
value_layer = self.transpose_for_scores(value)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / \
math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs) if self.output_attentions else (context_layer,)
def forward(self, hidden_states, attention_mask=None,
encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
mixed_query_layer = self.query(hidden_states)
if split_lengths:
assert not self.output_attentions
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if split_lengths:
query_parts = torch.split(mixed_query_layer, split_lengths, dim=1)
key_parts = torch.split(mixed_key_layer, split_lengths, dim=1)
value_parts = torch.split(mixed_value_layer, split_lengths, dim=1)
key = None
value = None
outputs = []
sum_length = 0
for (query, _key, _value, part_length) in zip(query_parts, key_parts, value_parts, split_lengths):
key = _key if key is None else torch.cat((key, _key), dim=1)
value = _value if value is None else torch.cat(
(value, _value), dim=1)
sum_length += part_length
outputs.append(self.multi_head_attention(
query, key, value, attention_mask[:, :,
sum_length - part_length: sum_length, :sum_length],
rel_pos=None if rel_pos is None else rel_pos[:, :,
sum_length - part_length: sum_length, :sum_length],
)[0])
outputs = (torch.cat(outputs, dim=1), )
else:
outputs = self.multi_head_attention(
mixed_query_layer, mixed_key_layer, mixed_value_layer,
attention_mask, rel_pos=rel_pos)
return outputs
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
self_outputs = self.self(
hidden_states, attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self.output(self_outputs[0], hidden_states)
# add attentions if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
self_attention_outputs = self.attention(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self_attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + self_attention_outputs[1:]
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
# last-layer hidden state, (all hidden states), (all attentions)
return outputs
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class TuringNLRv3Model(TuringNLRv3PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(TuringNLRv3Model, self).__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
if not isinstance(config, TuringNLRv3ForSeq2SeqConfig):
self.pooler = BertPooler(config)
else:
self.pooler = None
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(
self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, inputs_embeds=None, split_lengths=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output, position_ids = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
if self.config.rel_pos_bins > 0:
rel_pos_mat = position_ids.unsqueeze(-2) - \
position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(
embedding_output)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
else:
rel_pos = None
encoder_outputs = self.encoder(
embedding_output, attention_mask=extended_attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
sequence_output = encoder_outputs[0]
# add hidden_states and attentions if they are here
outputs = (sequence_output, ) + encoder_outputs[1:]
if self.pooler is None:
# sequence_output, pooled_output, (hidden_states), (attentions)
return outputs
else:
pooled_output = self.pooler(sequence_output)
return (sequence_output, pooled_output) + encoder_outputs[1:]
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.float().repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_weight = decoder_weight
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = F.linear(
hidden_states, weight=self.decoder_weight, bias=self.bias)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_weight)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
def create_mask_and_position_ids(num_tokens, max_len, offset=None):
base_position_matrix = torch.arange(
0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1)
mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens)
if offset is not None:
base_position_matrix = base_position_matrix + offset.view(-1, 1)
position_ids = base_position_matrix * mask
return mask, position_ids
class TuringNLRv3ForSequenceToSequence(TuringNLRv3PreTrainedModel):
MODEL_NAME = 'basic class'
def __init__(self, config):
super(TuringNLRv3ForSequenceToSequence, self).__init__(config)
self.bert = TuringNLRv3Model(config)
self.cls = BertOnlyMLMHead(
config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
self.log_softmax = nn.LogSoftmax()
self.source_type_id = config.source_type_id
self.target_type_id = config.target_type_id
if config.label_smoothing > 0:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
class TuringNLRv3ForSequenceToSequenceWithPseudoMask(TuringNLRv3ForSequenceToSequence):
MODEL_NAME = "TuringNLRv3ForSequenceToSequenceWithPseudoMask"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids),
target_span_ids, -target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = (0 <= to_weight) & (
torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1)
true_tokens_mask = (from_weight >= 0) & true_tokens & (
to_weight <= from_weight)
pseudo_tokens_mask = (
from_weight < 0) & true_tokens & (-to_weight > from_weight)
pseudo_tokens_mask = pseudo_tokens_mask | (
(from_weight < 0) & (to_weight == from_weight))
return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask)
def forward(
self, source_ids, target_ids, label_ids, pseudo_ids,
num_source_tokens, num_target_tokens, target_span_ids=None, target_no_offset=None):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
pseudo_len = pseudo_ids.size(1)
assert target_len == pseudo_len
assert source_len > 0 and target_len > 0
split_lengths = (source_len, target_len, pseudo_len)
input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id,
torch.ones_like(pseudo_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=None if target_no_offset else num_source_tokens)
position_ids = torch.cat(
(source_position_ids, target_position_ids, target_position_ids), dim=1)
if target_span_ids is None:
target_span_ids = target_position_ids
attention_mask = self.create_attention_mask(
source_mask, target_mask, source_position_ids, target_span_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
sequence_output = outputs[0]
pseudo_sequence_output = sequence_output[:, source_len + target_len:, ]
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(pseudo_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), label_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), label_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask)
return pseudo_lm_loss
class TuringNLRv3ForSequenceToSequenceUniLMV1(TuringNLRv3ForSequenceToSequence):
MODEL_NAME = "TuringNLRv3ForSequenceToSequenceUniLMV1"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat(
(torch.zeros_like(source_position_ids), target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = torch.cat((source_mask, target_mask), dim=1).unsqueeze(1)
return ((true_tokens == 1) & (to_weight <= from_weight)).type_as(source_mask)
def forward(self, source_ids, target_ids, masked_ids, masked_pos, masked_weight, num_source_tokens, num_target_tokens):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
split_lengths = (source_len, target_len)
input_ids = torch.cat((source_ids, target_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=num_source_tokens)
position_ids = torch.cat(
(source_position_ids, target_position_ids), dim=1)
attention_mask = self.create_attention_mask(
source_mask, target_mask, source_position_ids, target_position_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
sequence_output = outputs[0]
target_sequence_output = sequence_output[:, source_len:, ]
masked_sequence_output = gather_seq_out_by_pos(
target_sequence_output, masked_pos)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(masked_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weight)
return pseudo_lm_loss
class TuringNLRv3ForSequenceClassification(TuringNLRv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = TuringNLRv3Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
# head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
# add hidden states and attention if they are here
outputs = (logits,) + outputs[:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
# (loss), logits, last_hidden_state, pooled_output, (hidden_states), (attentions)
return outputs
| 38,964 | 44.360885 | 146 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/utils.py | from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import torch
import tqdm
import array
import collections
import torch.utils.data
from transformers.file_utils import WEIGHTS_NAME
try:
import lmdb
except:
pass
OPTIM_NAME = "optimizer.bin"
logger = logging.getLogger(__name__)
class TrainingExample(object):
def __init__(self, source_ids, target_ids, example_id):
self.source_ids = source_ids
self.target_ids = target_ids
self.example_id = example_id
class Seq2seqDatasetForTuringNLRv3(torch.utils.data.Dataset):
def __init__(
self, features, max_source_len, max_target_len,
vocab_size, cls_id, sep_id, pad_id, mask_id,
random_prob, keep_prob, offset, num_training_instances,
finetuning_method='v1', target_mask_prob=-1.0, num_max_mask_token=0,
source_mask_prob=-1.0,
):
self.features = features
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.offset = offset
if offset > 0:
logger.info(
" **** Set offset %d in Seq2seqDatasetForBert **** ", offset)
self.cls_id = cls_id
self.sep_id = sep_id
self.pad_id = pad_id
self.random_prob = random_prob
self.keep_prob = keep_prob
self.mask_id = mask_id
self.vocab_size = vocab_size
self.num_training_instances = num_training_instances
self.target_mask_prob = target_mask_prob
if finetuning_method == 'v0':
num_max_mask_token = self.max_target_len
logger.info("Mask way v0: set num_max_mask_token = %d" %
num_max_mask_token)
self.num_max_mask_token = num_max_mask_token
self.finetuning_method = finetuning_method
assert finetuning_method in ('v0', 'v1', 'v2')
self.source_mask_prob = source_mask_prob
def __len__(self):
return self.num_training_instances
def __trunk(self, ids, max_len, append_sep=True):
if append_sep:
max_len -= 1
if len(ids) > max_len:
ids = ids[:max_len]
if append_sep:
ids = ids + [self.sep_id]
return ids
def __pad(self, ids, max_len):
if len(ids) < max_len:
return ids + [self.pad_id] * (max_len - len(ids))
else:
assert len(ids) == max_len
return ids
def get_masked_token(self, tk_id):
p = random.random()
if p < self.keep_prob:
return tk_id
elif p < self.keep_prob + self.random_prob:
return random.randint(0, self.vocab_size - 1)
else:
return self.mask_id
def __getitem__(self, _idx):
idx = (self.offset + _idx) % len(self.features)
# print("%d get %d" % (_idx, idx))
feature = self.features[idx]
source_ids = self.__trunk([self.cls_id] + feature.source_ids,
self.max_source_len, append_sep=self.finetuning_method != 'v0')
target_ids = feature.target_ids
if self.finetuning_method == 'v0':
target_ids = [self.sep_id] + target_ids
target_ids = self.__trunk(
target_ids, self.max_target_len, append_sep=self.finetuning_method != 'v0')
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
if self.source_mask_prob > 0:
for i in range(num_source_tokens):
tk_id = source_ids[i]
if tk_id != self.cls_id and tk_id != self.sep_id:
r = random.random()
if r < self.source_mask_prob:
source_ids[i] = self.get_masked_token(tk_id)
source_ids = self.__pad(source_ids, self.max_source_len)
target_ids = self.__pad(target_ids, self.max_target_len)
if self.finetuning_method == 'v0':
masked_pos = []
masked_ids = []
masked_weights = []
for pos in range(num_target_tokens):
if pos + 1 != num_target_tokens:
masked_ids.append(target_ids[pos + 1])
else:
masked_ids.append(self.sep_id)
masked_pos.append(pos)
masked_weights.append(1)
r = random.random()
if r < self.target_mask_prob and pos > 0:
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(
masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.finetuning_method == 'v1':
masked_pos = list(range(num_target_tokens))
random.shuffle(masked_pos)
num_masked_token = \
min(self.num_max_mask_token, int(
self.target_mask_prob * num_target_tokens))
if num_masked_token <= 0:
num_masked_token = 1
masked_pos = masked_pos[:num_masked_token]
masked_ids = []
masked_weights = []
for pos in masked_pos:
masked_ids.append(target_ids[pos])
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_weights.append(1)
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(
masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.finetuning_method == 'v2':
pseudo_ids = []
label_ids = []
for pos in range(num_target_tokens):
tk_id = target_ids[pos]
masked_tk_id = self.get_masked_token(tk_id)
pseudo_ids.append(masked_tk_id)
label_ids.append(tk_id)
r = random.random()
if r < self.target_mask_prob:
target_ids[pos] = masked_tk_id
label_ids = self.__pad(label_ids, self.max_target_len)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len)
return source_ids, target_ids, label_ids, pseudo_ids, num_source_tokens, num_target_tokens
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(
output_dir, "ckpt-*/%s" % WEIGHTS_NAME))
fn_optim_list = glob.glob(os.path.join(
output_dir, "ckpt-*/%s" % OPTIM_NAME))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_model_list]
) & set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def get_checkpoint_state_dict(output_dir, ckpt):
model_recover_checkpoint = os.path.join(
output_dir, "ckpt-%d" % ckpt, WEIGHTS_NAME)
logger.info(" ** Recover model checkpoint in %s ** ",
model_recover_checkpoint)
model_state_dict = torch.load(model_recover_checkpoint, map_location='cpu')
optimizer_recover_checkpoint = os.path.join(
output_dir, "ckpt-%d" % ckpt, OPTIM_NAME)
checkpoint_state_dict = torch.load(
optimizer_recover_checkpoint, map_location='cpu')
checkpoint_state_dict['model'] = model_state_dict
return checkpoint_state_dict
def report_length(length_counter, total_count):
max_len = max(length_counter.keys())
a = 0
tc = 0
while a < max_len:
cc = 0
for i in range(16):
cc += length_counter[a + i]
tc += cc
if cc > 0:
logger.info("%d ~ %d = %d, %.2f%%" %
(a, a + 16, cc, (tc * 100.0) / total_count))
a += 16
def serialize_str(x):
return u"{}".format(x).encode('ascii')
def serialize_array(x, dtype):
data = array.array(dtype)
data.fromlist(x)
return data.tobytes()
def write_to_lmdb(db, key, value):
success = False
while not success:
txn = db.begin(write=True)
try:
txn.put(key, value)
txn.commit()
success = True
except lmdb.MapFullError:
txn.abort()
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit*2
print('>>> Doubling LMDB map size to %sMB ...' %
(new_limit >> 20,))
db.set_mapsize(new_limit) # double it
def deserialize_str(x):
return x.decode('ascii')
class DocDB(object):
def __init__(self, db_path):
self.db_path = db_path
self.env = lmdb.open(db_path, readonly=True,
lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.start_key_index = int(deserialize_str(txn.get(b'__start__')))
self.size = int(deserialize_str(txn.get(b'__size__')))
self.dtype = deserialize_str(txn.get(b'__dtype__'))
def _deserialize_array(self, x):
data = array.array(self.dtype)
data.frombytes(x)
return data.tolist()
def __getitem__(self, doc_id):
with self.env.begin(write=False) as txn:
# example = {
# "source_ids": self._deserialize_array(txn.get(b"src_ids_%d" % doc_id)),
# "target_ids": self._deserialize_array(txn.get(b"tgt_ids_%d" % doc_id)),
# }
example = TrainingExample(
source_ids=self._deserialize_array(
txn.get(b"src_ids_%d" % doc_id)),
target_ids=self._deserialize_array(
txn.get(b"tgt_ids_%d" % doc_id)),
example_id=None,
)
return example
def __len__(self):
return self.size
def load_and_cache_examples(
example_file, tokenizer, local_rank, cached_features_file, shuffle=True,
lmdb_cache=None, lmdb_dtype='h', eval_mode=False):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.isfile(cached_features_file):
logger.info("Loading features from cached file %s",
cached_features_file)
features = torch.load(cached_features_file)
elif cached_features_file is not None and os.path.isdir(cached_features_file) \
and os.path.exists(os.path.join(cached_features_file, 'lock.mdb')):
logger.info("Loading features from cached LMDB %s",
cached_features_file)
features = DocDB(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", example_file)
examples = []
with open(example_file, mode="r", encoding="utf-8") as reader:
for line in reader:
examples.append(json.loads(line))
features = []
slc = collections.defaultdict(int)
tlc = collections.defaultdict(int)
for example in tqdm.tqdm(examples):
if isinstance(example["src"], list):
source_tokens = example["src"]
target_tokens = [] if eval_mode else example["tgt"]
else:
source_tokens = tokenizer.tokenize(example["src"])
target_tokens = [] if eval_mode else tokenizer.tokenize(
example["tgt"])
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
slc[len(source_ids)] += 1
tlc[len(target_ids)] += 1
features.append(
TrainingExample(
source_ids=source_ids,
target_ids=target_ids,
example_id=len(features),
)
)
if shuffle:
random.shuffle(features)
logger.info("Shuffle the features !")
logger.info("Source length:")
report_length(slc, total_count=len(examples))
logger.info("Target length:")
report_length(tlc, total_count=len(examples))
if local_rank in [-1, 0] and cached_features_file is not None:
if lmdb_cache:
db = lmdb.open(cached_features_file,
readonly=False, map_async=True)
for idx, feature in enumerate(features):
write_to_lmdb(
db, b"src_ids_%d" % idx,
serialize_array(feature.source_ids, dtype=lmdb_dtype))
write_to_lmdb(
db, b"tgt_ids_%d" % idx,
serialize_array(feature.target_ids, dtype=lmdb_dtype))
write_to_lmdb(db, b"__start__", serialize_str(0))
write_to_lmdb(db, b"__size__", serialize_str(len(features)))
write_to_lmdb(db, b"__dtype__", serialize_str(lmdb_dtype))
db.sync()
db.close()
logger.info("db_key_idx = %d" % len(features))
del features
features = cached_features_file
logger.info("Saving features into cached lmdb dir %s",
cached_features_file)
else:
logger.info("Saving features into cached file %s",
cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
| 14,533 | 35.888325 | 119 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/modeling_decoding.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None,
reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
logger = logging.getLogger(__name__)
from transformers import WEIGHTS_NAME
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
relax_projection=0,
new_pos_ids=False,
initializer_range=0.02,
task_idx=None,
fp32_embedding=False,
ffn_type=0,
label_smoothing=None,
num_qkv=0,
seg_emb=False,
source_type_id=0,
target_type_id=1,
rel_pos_bins=0,
max_rel_pos=0, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.relax_projection = relax_projection
self.new_pos_ids = new_pos_ids
self.initializer_range = initializer_range
self.task_idx = task_idx
self.fp32_embedding = fp32_embedding
self.ffn_type = ffn_type
self.label_smoothing = label_smoothing
self.num_qkv = num_qkv
self.seg_emb = seg_emb
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
if config.type_vocab_size == 0:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
embeddings = words_embeddings + position_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if hasattr(config, 'num_qkv') and (config.num_qkv > 1):
self.num_qkv = config.num_qkv
else:
self.num_qkv = 1
self.query = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.key = nn.Linear(config.hidden_size,
self.all_head_size * self.num_qkv)
self.value = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.uni_debug_flag = True if os.getenv(
'UNI_DEBUG_FLAG', '') else False
if self.uni_debug_flag:
self.register_buffer('debug_attention_probs',
torch.zeros((512, 512)))
if hasattr(config, 'seg_emb') and config.seg_emb:
self.b_q_s = nn.Parameter(torch.zeros(
1, self.num_attention_heads, 1, self.attention_head_size))
self.seg_emb = nn.Embedding(
config.type_vocab_size, self.all_head_size)
else:
self.b_q_s = None
self.seg_emb = None
def transpose_for_scores(self, x, mask_qkv=None):
if self.num_qkv > 1:
sz = x.size()[:-1] + (self.num_qkv,
self.num_attention_heads, self.all_head_size)
# (batch, pos, num_qkv, head, head_hid)
x = x.view(*sz)
if mask_qkv is None:
x = x[:, :, 0, :, :]
elif isinstance(mask_qkv, int):
x = x[:, :, mask_qkv, :, :]
else:
# mask_qkv: (batch, pos)
if mask_qkv.size(1) > sz[1]:
mask_qkv = mask_qkv[:, :sz[1]]
# -> x: (batch, pos, head, head_hid)
x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand(
sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2)
else:
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
# (batch, pos, head, head_hid)
x = x.view(*sz)
# (batch, head, pos, head_hid)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None,
key_cache=None, value_cache=None, rel_pos=None,
):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(hidden_states, self.key.weight)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(x_states, self.key.weight)
mixed_value_layer = self.value(x_states)
if key_cache is not None and isinstance(key_cache, list):
key_cache.append(mixed_key_layer)
mixed_key_layer = torch.cat(key_cache, dim=1)
if value_cache is not None and isinstance(value_cache, list):
value_cache.append(mixed_value_layer)
mixed_value_layer = torch.cat(value_cache, dim=1)
query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv)
key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv)
value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv)
if key_history is not None and not isinstance(key_history, list):
key_layer = torch.cat((key_history, key_layer), dim=-2)
value_layer = torch.cat((value_history, value_layer), dim=-2)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch, head, pos, pos)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
if self.seg_emb is not None:
seg_rep = self.seg_emb(seg_ids)
# (batch, pos, head, head_hid)
seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size(
1), self.num_attention_heads, self.attention_head_size)
qs = torch.einsum('bnih,bjnh->bnij',
query_layer + self.b_q_s, seg_rep)
attention_scores = attention_scores + qs
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.uni_debug_flag:
_pos = attention_probs.size(-1)
self.debug_attention_probs[:_pos, :_pos].copy_(
attention_probs[0].mean(0).view(_pos, _pos))
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if isinstance(key_history, list):
key_history.append(key_layer)
if isinstance(value_history, list):
value_history.append(value_layer)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerFFN(nn.Module):
def __init__(self, config):
super(TransformerFFN, self).__init__()
self.ffn_type = config.ffn_type
assert self.ffn_type in (1, 2)
if self.ffn_type in (1, 2):
self.wx0 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (2,):
self.wx1 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (1, 2):
self.output = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x):
if self.ffn_type in (1, 2):
x0 = self.wx0(x)
if self.ffn_type == 1:
x1 = x
elif self.ffn_type == 2:
x1 = self.wx1(x)
out = self.output(x0 * x1)
out = self.dropout(out)
out = self.LayerNorm(out + x)
return out
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.ffn_type = config.ffn_type
if self.ffn_type:
self.ffn = TransformerFFN(config)
else:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
if self.ffn_type:
layer_output = self.ffn(attention_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None,
seg_ids=None, key_history=None, value_history=None, rel_pos=None):
# history embedding and encoded layer must be simultanously given
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for i, layer_module in enumerate(self.layer):
set_key = None
if isinstance(key_history, list):
set_key = key_history if len(key_history) < len(self.layer) else key_history[i]
set_value = None
if isinstance(value_history, list):
set_value = value_history if len(key_history) < len(self.layer) else value_history[i]
hidden_states = layer_module(
hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids,
key_history=set_key, value_history=set_value, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
hid_size = config.hidden_size
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
hid_size *= config.relax_projection
self.dense = nn.Linear(config.hidden_size, hid_size)
self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(
bert_model_embedding_weights.size(0)))
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
self.relax_projection = config.relax_projection
else:
self.relax_projection = 0
self.fp32_embedding = config.fp32_embedding
def convert_to_type(tensor):
if self.fp32_embedding:
return tensor.half()
else:
return tensor
self.type_converter = convert_to_type
self.converted = False
def forward(self, hidden_states, task_idx=None):
if not self.converted:
self.converted = True
if self.fp32_embedding:
self.transform.half()
hidden_states = self.transform(self.type_converter(hidden_states))
if self.relax_projection > 1:
num_batch = hidden_states.size(0)
num_pos = hidden_states.size(1)
# (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid)
hidden_states = hidden_states.view(
num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
if self.fp32_embedding:
hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter(
self.decoder.weight), self.type_converter(self.bias))
else:
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights, num_labels=2):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, num_labels)
def forward(self, sequence_output, pooled_output, task_idx=None):
prediction_scores = self.predictions(sequence_output, task_idx)
if pooled_output is None:
seq_relationship_score = None
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
# module.weight.data.copy_(torch.Tensor(
# truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range))
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
logger.info("Model config {}".format(config))
# clean the arguments in kwargs
for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx',
'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing',
'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb',
'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'):
if arg_clean in kwargs:
del kwargs[arg_clean]
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.config = config
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None, rel_pos=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
if self.rel_pos_bias is not None:
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
# print("Rel pos size = %s" % str(rel_pos.size()))
else:
rel_pos = None
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids, rel_pos=rel_pos)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, mask_qkv=None, task_idx=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertPreTrainingPairTransform(nn.Module):
def __init__(self, config):
super(BertPreTrainingPairTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, pair_x, pair_y):
hidden_states = torch.cat([pair_x, pair_y], dim=-1)
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
# hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class BertForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s",
pos_shift=False):
super(BertForSeq2SeqDecoder, self).__init__(config)
self.bert = BertModelIncr(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
assert mode in ("s2s", "l2r")
self.mode = mode
self.pos_shift = pos_shift
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask,
task_idx=task_idx, mask_qkv=mask_qkv)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = new_embedding
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
else:
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
# print("Rel pos size = %s" % str(rel_pos.size()))
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos - input_length + 1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.div(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores))
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding)
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
else:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
if rel_pos is not None:
rel_pos = first_expand(rel_pos)
mask_ids = first_expand(mask_ids)
if mask_qkv is not None:
mask_qkv = first_expand(mask_qkv)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n - 1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not (
self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
# [(batch, beam)]
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
# first we need to find the eos frame where all symbols are eos
# any frames after the eos frame are invalid
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[i, :length, ...] = tensor
return out_tensor
# convert to tensors for DataParallel
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| 67,538 | 44.944898 | 139 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/config.py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from transformers import BertConfig
from tnlrv3.configuration_tnlrv3 import TuringNLRv3Config
logger = logging.getLogger(__name__)
class TuringNLRv3ForSeq2SeqConfig(BertConfig):
def __init__(self, label_smoothing=0.1, source_type_id=0, target_type_id=1,
rel_pos_bins=0, max_rel_pos=0, fix_word_embedding=False, **kwargs):
super(TuringNLRv3ForSeq2SeqConfig, self).__init__(**kwargs)
self.label_smoothing = label_smoothing
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
self.fix_word_embedding = fix_word_embedding
@classmethod
def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None, fix_word_embedding=False):
required_keys = [
"vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads",
"hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob",
"max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps",
]
kwargs = {}
for key in required_keys:
assert hasattr(config, key)
kwargs[key] = getattr(config, key)
kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"]
additional_keys = [
"source_type_id", "target_type_id", "rel_pos_bins", "max_rel_pos",
]
for key in additional_keys:
if hasattr(config, key):
kwargs[key] = getattr(config, key)
if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings:
kwargs["max_position_embeddings"] = max_position_embeddings
logger.info(" ** Change max position embeddings to %d ** " %
max_position_embeddings)
return cls(label_smoothing=label_smoothing, fix_word_embedding=fix_word_embedding, **kwargs)
| 2,104 | 41.959184 | 116 | py |
Tiny-NewsRec | Tiny-NewsRec-main/PLM-NR/tnlrv3/configuration_tnlrv3.py | # coding=utf-8
""" TuringNLRv3 model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
TuringNLRv3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
}
class TuringNLRv3Config(PretrainedConfig):
r"""
:class:`~transformers.TuringNLRv3Config` is the configuration class to store the configuration of a
`TuringNLRv3Model`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TuringNLRv3Model`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`TuringNLRv3Model`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = TuringNLRv3_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
source_type_id=0,
target_type_id=1,
**kwargs):
super(TuringNLRv3Config, self).__init__(**kwargs)
if isinstance(vocab_size, str) or (sys.version_info[0] == 2
and isinstance(vocab_size, unicode)):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.source_type_id = source_type_id
self.target_type_id = target_type_id
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| 4,136 | 45.483146 | 107 | py |
cz_corpus | cz_corpus-master/Evaluator.py | # -*- coding: utf-8 -*-
__author__ = 'svobik'
from gensim.models.word2vec import Word2Vec
from gensim import corpora, models, similarities, matutils
import re
import os
import logging
import optparse
import numpy as np
import operator
import codecs
from fnmatch import fnmatch
NUM_SEMANTIC_CLASSES = 6
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def cosine_vector_similarity(vec_a, vec_b):
sim = np.dot(vec_a, vec_b)/(np.linalg.norm(vec_a)* np.linalg.norm(vec_b))
return sim
def result_vector(st1,st2,st3, model):
vec1 = model.syn0norm[model.vocab[st1].index]
vec2 = model.syn0norm[model.vocab[st2].index]
vec3 = model.syn0norm[model.vocab[st3].index]
sub_vec = map(operator.sub, vec2,vec1)
result_vec = map(operator.add, sub_vec, vec3)
return result_vec
def most_similar_to_vec(vector,model,topn, list_words):
dists = np.dot(model.syn0norm, vector)
best = matutils.argsort(dists, topn=topn + len(list_words), reverse=True)
# ignore (don't return) words from the input
result = [(model.index2word[sim], float(dists[sim])) for sim in best if model.index2word[sim] not in list_words]
return result[:topn]
def evaluate_file(filePath, topN, outputFile):
accuracy = 0.0
accuracyCosMul = 0.0
accuracyAll = 0.0
accuracyAllCosMul = 0.0
classItemsCount = 0
notSeenCounter = 0
questionsCount =0
classNumb = 0
listAccSemantic = []
listAccSynt= []
fw = codecs.open(outputFile[:-4]+".res"+str(topN)+".txt", 'w','utf-8' )
prevCategory = ": Antonyms-nouns"
fwerr = codecs.open(outputFile[:-4]+"err.log", 'w', 'utf-8')
listErr= []
with codecs.open(filePath, 'r','utf-8') as f:
for line in f:
if (line.strip()[0]==':'):
if classItemsCount!=0:
currAcc= (accuracy/classItemsCount)*100.0
currAccCosMul= (accuracyCosMul/classItemsCount)*100.0
if classNumb< NUM_SEMANTIC_CLASSES:
listAccSemantic.append(currAcc)
else :
listAccSynt.append(currAcc)
print(prevCategory + " > accuracy TOP%d = %f (%d/%d)\n" % (topN,currAcc, accuracy,classItemsCount))
fw.write(prevCategory.encode('utf-8') + " > accuracy TOP%d = %f (%d/%d) \n" % (topN,currAcc, accuracy,classItemsCount))
prevCategory = line
classNumb = classNumb + 1
print line
accuracy = 0.0
accuracyCosMul = 0.0
classItemsCount = 0
else:
tokens = line.lower().strip().split(" ")
questionsCount = questionsCount + 1.0
classItemsCount = classItemsCount + 1.0
try:
list = most_similar_to_vec(result_vector(tokens[0], tokens[1], tokens[2], model),model,topN,tokens[:-1])
for item in list:
match = item[0]
#match = match.encode('utf-8')
if match == tokens[3]:
#print "Correct item=%s" % (item[0])
accuracy =accuracy + 1.0
accuracyAll =accuracyAll + 1.0
except KeyError,e:
logging.error(e)
wordErr = str(e).encode("utf-8")
notSeenCounter = notSeenCounter + 1.0
listErr.append(e)
if classItemsCount!=0:
currAcc= (accuracy/classItemsCount)*100.0
currAccCosMul= (accuracyCosMul/classItemsCount)*100.0
listAccSynt.append(currAcc)
print(prevCategory + " > accuracy TOP%d = %f (%d/%d)\n" % (topN,currAcc, accuracy,classItemsCount))
fw.write(prevCategory.encode('utf-8') + " > accuracy TOP%d = %f (%d/%d)\n" % (topN,currAcc, accuracy,classItemsCount))
avgVal = 0.0
count= 0.0
for val in listAccSemantic:
avgVal = avgVal +val
count = count + 1.0
semanticAcc = avgVal / count
avgVal = 0.0
count= 0.0
for val in listAccSynt:
avgVal = avgVal +val
count = count + 1.0
syntacticAcc = avgVal / count
print "Total accuracy TOP%d = %f \n" % (topN,(accuracyAll/questionsCount)*100.0)
fw.write("Total accuracy TOP%d = %f \n" % (topN,(accuracyAll/questionsCount)*100.0))
fw.write("Semantic accuracy TOP%d = %f \n" % (topN,semanticAcc))
fw.write("Syntactic accuracy TOP%d = %f \n" % (topN,syntacticAcc))
#print "Total accuracy CosMul TOP%d = %f" % (topN,(accuracyAllCosMul/questionsCount)*100.0)
#fw.write("Total accuracy CosMul TOP%d = %d", (topN,(accuracyAllCosMul/questionsCount)*100.0))
print "Seen= %f" % (((questionsCount-notSeenCounter)/questionsCount) * 100.0)
fw.write("Seen= %f"% (((questionsCount-notSeenCounter)/questionsCount) * 100.0))
for word in np.unique(listErr):
fwerr.write(str(word)+"\n")
fw.close()
fwerr.close()
return 0
if __name__ == '__main__':
parser = optparse.OptionParser(usage="%prog [OPTIONS]")
parser.add_option('-m', '--model', default='./models/vectors_cz_cbow_dim300_w10_phrase.txt',
help='Give a path with the name of a model to load (default name= vector.txt)')
parser.add_option('-c', '--corpus', default='./corpus/czech_emb_corpus.txt',
help='Give a name of corpus to analyze (default: ./corpus/czech_emb_corpus.txt)')
parser.add_option('-t', '--topn', default='1',
help='TOP N similar words')
options, args = parser.parse_args()
model = Word2Vec.load_word2vec_format(options.model,binary=False)
evaluate_file(options.corpus,int(options.topn), options.model)
| 5,830 | 34.993827 | 139 | py |
DDOD | DDOD-main/setup.py | #!/usr/bin/env python
import os
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='OpenMMLab',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 5,899 | 35.196319 | 125 | py |
DDOD | DDOD-main/coco_cfg/atss_r50_1x.py | fp16 = dict(loss_scale=512.)
model = dict(
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='data/pretrain_models/resnet50-0676ba61.pth')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSIoUHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 4,154 | 29.777778 | 99 | py |
DDOD | DDOD-main/coco_cfg/ddod_r50_1x.py | fp16 = dict(loss_scale=512.)
model = dict(
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='data/pretrain_models/resnet50-0676ba61.pth')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='DDODHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSCostAssigner', topk=9),
reg_assigner=dict(type='ATSSCostAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 4,226 | 30.080882 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.