Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
evo | evo-master/fastentrypoints.py | # Copyright (c) 2016, Aaron Christianson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Monkey patch setuptools to write faster console_scripts with this format:
import sys
from mymodule import entry_function
sys.exit(entry_function())
This is better.
(c) 2016, Aaron Christianson
http://github.com/ninjaaron/fast-entry_points
'''
from setuptools.command import easy_install
import re
TEMPLATE = '''\
# -*- coding: utf-8 -*-
# EASY-INSTALL-ENTRY-SCRIPT: '{3}','{4}','{5}'
__requires__ = '{3}'
import re
import sys
from {0} import {1}
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit({2}())'''
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
# ensure_safe_name
if re.search(r'[\\/]', name):
raise ValueError("Path separators not allowed in script names")
script_text = TEMPLATE.format(
ep.module_name, ep.attrs[0], '.'.join(ep.attrs),
spec, group, name)
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
import os
import re
import shutil
import sys
dests = sys.argv[1:] or ['.']
filename = re.sub('\.pyc$', '.py', __file__)
for dst in dests:
shutil.copy(filename, dst)
manifest_path = os.path.join(dst, 'MANIFEST.in')
setup_path = os.path.join(dst, 'setup.py')
# Insert the include statement to MANIFEST.in if not present
with open(manifest_path, 'a+') as manifest:
manifest.seek(0)
manifest_content = manifest.read()
if not 'include fastentrypoints.py' in manifest_content:
manifest.write(('\n' if manifest_content else '')
+ 'include fastentrypoints.py')
# Insert the import statement to setup.py if not present
with open(setup_path, 'a+') as setup:
setup.seek(0)
setup_content = setup.read()
if not 'import fastentrypoints' in setup_content:
setup.seek(0)
setup.truncate()
setup.write('import fastentrypoints\n' + setup_content)
print(__name__)
| 3,950 | 34.594595 | 79 | py |
evo | evo-master/setup.py | from setuptools import setup, Command
from setuptools.command.install import install
import os
import sys
import shutil
import subprocess as sp
# monkey patch because setuptools entry_points are slow as fuck
# https://github.com/ninjaaron/fast-entry_points
import fastentrypoints # pylint: disable=unused-import
HERE = os.path.abspath(os.path.dirname(__file__))
def activate_argcomplete():
if os.name == "nt":
return
print("Activating argcomplete...")
try:
sp.check_call("activate-global-python-argcomplete", shell=True)
print("Done - argcomplete should work now.")
except sp.CalledProcessError as e:
print("Error:", e.output, file=sys.stderr)
def _post_install(install_lib_dir):
activate_argcomplete()
class CustomInstall(install):
def run(self):
install.run(self)
self.execute(_post_install, (self.install_lib, ),
msg="Running post install task of evo...")
# cmd: python setup.py upload
class UploadCommand(Command):
description = "Build and publish the package."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
print("Removing previous dist/ ...")
shutil.rmtree(os.path.join(HERE, "dist"))
except OSError:
pass
print("Building source distribution...")
sp.check_call([sys.executable, "setup.py", "sdist"])
print("Uploading package to PyPi...")
sp.check_call(["twine", "upload", "dist/*"])
sys.exit()
# yapf: disable
setup(
name="evo",
version=open("evo/version").read(),
description="Python package for the evaluation of odometry and SLAM",
author="Michael Grupp",
author_email="michael.grupp@tum.de",
url="https://github.com/MichaelGrupp/evo",
license="GPLv3",
long_description=open(os.path.join(HERE, "README.md")).read(),
long_description_content_type="text/markdown",
keywords=[
"SLAM", "odometry", "trajectory", "evaluation", "metric",
"vision", "laser", "visual", "robotics"
],
packages=["evo", "evo.core", "evo.tools"],
package_data={"evo": ["version", "LICENSE"]},
entry_points={"console_scripts": [
"evo_ape=evo.entry_points:ape",
"evo_rpe=evo.entry_points:rpe",
"evo_traj=evo.entry_points:traj",
"evo_res=evo.entry_points:res",
"evo_config=evo.main_config:main",
"evo_fig=evo.main_fig:main",
"evo_ipython=evo.main_ipython:main",
"evo=evo.main_evo:main"
]},
zip_safe=False,
cmdclass={
"install": CustomInstall,
"upload": UploadCommand
},
install_requires=[
"numpy>=1.18.5",
"matplotlib",
"scipy>=1.2",
"pandas",
"numexpr>=2.7.3",
"seaborn>=0.9",
"natsort",
"argcomplete",
"colorama>=0.3",
"pygments",
"pyyaml",
"pillow",
"rosbags>=0.9.10",
],
python_requires=">=3.8",
classifiers=[
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Topic :: Scientific/Engineering",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: Implementation :: CPython"
]
)
# yapf: enable
| 3,576 | 28.081301 | 75 | py |
evo | evo-master/.ci/debian_install_pip3.sh | #!/bin/bash
set -e
apt update
apt install -y python3-pip
| 59 | 7.571429 | 26 | sh |
evo | evo-master/.ci/ros_entrypoint.sh | #!/bin/bash
set -e
source "/opt/ros/$ROS_DISTRO/setup.bash"
exec "$@"
| 72 | 9.428571 | 40 | sh |
evo | evo-master/.ci/ros_run_tests.sh | #!/bin/bash
set -e
workdir=$1
source /opt/ros/$ROS_DISTRO/setup.sh
cd $workdir
pytest -sv
| 93 | 8.4 | 36 | sh |
evo | evo-master/.ci/run_yapf.sh | #!/bin/bash
set -e
if [ ! -f setup.py ]; then
echo "Error: please execute it in the base directory of the repository."
exit 1
fi
# Exclude 3rd party files.
yapf --recursive --in-place -vv . \
--exclude "fastentrypoints.py" \
--exclude "evo/core/transformations.py" \
--exclude "test/tum_benchmark_tools/*" \
| 322 | 19.1875 | 74 | sh |
evo | evo-master/.github/stale.yaml | # Number of days of inactivity before an issue becomes stale
daysUntilStale: 30
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
# Only issues or pull requests with all of these labels are check if stale.
onlyLabels:
- more info needed
- wontfix
- invalid
- duplicate
- installation
- dependencies
- question
- data
# Issues with these labels will never be considered stale
exemptLabels:
- bug
- security
- pinned
# Label to use when marking an issue as stale
staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: false
| 893 | 29.827586 | 77 | yaml |
evo | evo-master/.github/ISSUE_TEMPLATE/error-report.md | ---
name: Error report
about: Fill this out and upload your data (!!)
---
**Description:**
**Command:**
```
# replace this line with the command(s) you used
```
**Console output:**
```
# remove this line and paste your console output HERE - no screenshots please
```
**Additional files:**
Please attach all the files needed to reproduce the error.
Please give also the following information:
* evo version number shown by `evo pkg --version`:
* Python version shown by `evo pkg --pyversion`:
* operating system and version (e.g. Ubuntu 16.04 or Windows 10):
* did you change the source code? (yes / no):
* output of `evo_config show --brief --no_color`:
```
# remove this line and paste your config HERE
```
| 724 | 19.138889 | 77 | md |
evo | evo-master/.github/ISSUE_TEMPLATE/feature_request.md | ---
name: Feature request
about: Suggest an idea for this project
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
| 560 | 30.166667 | 92 | md |
evo | evo-master/.github/ISSUE_TEMPLATE/question-about-a-concept.md | ---
name: Question about a concept
about: Ask a question about theory, API, etc.
---
| 88 | 10.125 | 45 | md |
evo | evo-master/contrib/README.md | # contrib
Stuff that is not part of the package distribution, but can be useful in special cases. | 98 | 32 | 87 | md |
evo | evo-master/contrib/kitti_poses_and_timestamps_to_trajectory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from evo.core.trajectory import PoseTrajectory3D
from evo.tools import file_interface
import numpy as np
DESC = "Combine KITTI poses and timestamps files to a TUM trajectory file"
def kitti_poses_and_timestamps_to_trajectory(poses_file, timestamp_file):
pose_path = file_interface.read_kitti_poses_file(poses_file)
raw_timestamps_mat = file_interface.csv_read_matrix(timestamp_file)
error_msg = ("timestamp file must have one column of timestamps and same number of rows as the KITTI poses file")
if len(raw_timestamps_mat) > 0 and len(raw_timestamps_mat[0]) != 1 or len(raw_timestamps_mat) != pose_path.num_poses:
raise file_interface.FileInterfaceException(error_msg)
try:
timestamps_mat = np.array(raw_timestamps_mat).astype(float)
except ValueError:
raise file_interface.FileInterfaceException(error_msg)
return PoseTrajectory3D(poses_se3=pose_path.poses_se3, timestamps=timestamps_mat)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument("poses_file", help="pose path file in KITTI format")
parser.add_argument(
"timestamp_file", help="KITTI timestamp file of the poses")
parser.add_argument(
"trajectory_out", help="output file path for trajectory in TUM format")
args = parser.parse_args()
trajectory = kitti_poses_and_timestamps_to_trajectory(
args.poses_file, args.timestamp_file)
file_interface.write_tum_trajectory_file(args.trajectory_out, trajectory)
| 1,586 | 43.083333 | 121 | py |
evo | evo-master/contrib/multiply_timestamps.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from evo.tools import file_interface
DESC = """multiply the timestamps of a TUM trajectory file by a factor"""
def main(traj_file, factor):
traj = file_interface.read_tum_trajectory_file(traj_file)
traj.timestamps = traj.timestamps * factor
file_interface.write_tum_trajectory_file(traj_file, traj)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument("traj_file", help="trajectory in TUM format")
parser.add_argument("factor", help="factor", type=float)
args = parser.parse_args()
main(args.traj_file, args.factor)
| 666 | 28 | 73 | py |
evo | evo-master/contrib/print_duplicate_timestamps.sh | #!/bin/bash
set -e
usage="
Print lines with duplicate timestamps in TUM or EuRoC trajectory files.\n\n
Usage: ./print_duplicates.sh TRAJECTORY
"
if [ "$#" -ne 1 ]; then
echo -e $usage
exit 1
fi
cut -d" " -f 1 $1 | uniq -D
| 235 | 12.882353 | 75 | sh |
evo | evo-master/contrib/record_tf_as_posestamped_bag.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import os
import rosbag
import rospy
import tf2_ros
from geometry_msgs.msg import PoseStamped
DESC = """Record a tf frame's trajectory to a geometry_msgs/PoseStamped bag."""
class Recorder(object):
def __init__(self, parent_frame, child_frame, lookup_frequency, bagfile,
output_topic, append):
self.parent_frame = parent_frame
self.child_frame = child_frame
self.bagfile = bagfile
self.tf_buffer = tf2_ros.Buffer()
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
self.lookup_frequency = lookup_frequency
self.output_topic = output_topic
self.append = append
def run(self):
msg_count = 0
try:
bag = rosbag.Bag(self.bagfile, mode='a' if self.append else 'w')
rate = rospy.Rate(self.lookup_frequency)
last_stamp = rospy.Time()
while not rospy.is_shutdown():
try:
transform = self.tf_buffer.lookup_transform(
self.parent_frame, self.child_frame, rospy.Time())
rate.sleep()
except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException):
rate.sleep()
continue
if last_stamp == transform.header.stamp:
continue
pose = transformstamped_to_posestamped(transform)
bag.write(self.output_topic, pose, t=pose.header.stamp)
msg_count += 1
last_stamp = transform.header.stamp
rospy.loginfo_throttle(
10, "Recorded {} PoseStamped messages.".format(msg_count))
except rospy.ROSInterruptException:
pass
finally:
bag.close()
rospy.loginfo("Finished recording.")
def transformstamped_to_posestamped(transform_stamped):
pose_stamped = PoseStamped()
pose_stamped.header = transform_stamped.header
pose_stamped.pose.position = transform_stamped.transform.translation
pose_stamped.pose.orientation = transform_stamped.transform.rotation
return pose_stamped
def timestamp_str():
return str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
def main(parent_frame, child_frame, lookup_frequency, bagfile, output_topic,
append):
rospy.init_node("record_tf_as_posestamped_bag")
recorder = Recorder(parent_frame, child_frame, lookup_frequency, bagfile,
output_topic, append)
recorder.run()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument("parent_frame")
parser.add_argument("child_frame")
parser.add_argument(
"--lookup_frequency", help="maximum frequency at which transforms "
"are looked up", default=100.0, type=float)
parser.add_argument("--output_topic", help="name of the output topic",
default=None)
parser.add_argument(
"--bagfile", help="output bagfile path", default=os.path.join(
os.getcwd(),
timestamp_str() + ".bag"))
parser.add_argument("--append", action="store_true",
help="whether to append to an existing bagfile")
args = parser.parse_args()
if args.output_topic is None:
output_topic = args.child_frame
else:
output_topic = args.output_topic
main(args.parent_frame, args.child_frame, args.lookup_frequency,
args.bagfile, output_topic, args.append)
| 4,348 | 34.357724 | 79 | py |
evo | evo-master/contrib/rename_est_name.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from evo.tools import file_interface
DESC = """rename the 'est_name' field in a result file"""
def main(res_file, new_name):
result = file_interface.load_res_file(res_file)
result.info["est_name"] = new_name
file_interface.save_res_file(res_file, result)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument("res_file", help="evo result file")
parser.add_argument("new_name", help="new 'est_name'")
args = parser.parse_args()
main(args.res_file, args.new_name)
| 611 | 25.608696 | 59 | py |
evo | evo-master/doc/alignment_demo.py | #!/usr/bin/env python
"""
test/demo for trajectory alignment functions
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
import logging
import sys
import evo.core.lie_algebra as lie
from evo.core import trajectory
from evo.tools import plot, file_interface, log
import numpy as np
import matplotlib.pyplot as plt
logger = logging.getLogger("evo")
log.configure_logging(verbose=True)
traj_ref = file_interface.read_kitti_poses_file("../test/data/KITTI_00_gt.txt")
traj_est = file_interface.read_kitti_poses_file(
"../test/data/KITTI_00_ORB.txt")
# add artificial Sim(3) transformation
traj_est.transform(lie.se3(np.eye(3), np.array([0, 0, 0])))
traj_est.scale(0.5)
logger.info("\nUmeyama alignment without scaling")
traj_est_aligned = copy.deepcopy(traj_est)
traj_est_aligned.align(traj_ref)
logger.info("\nUmeyama alignment with scaling")
traj_est_aligned_scaled = copy.deepcopy(traj_est)
traj_est_aligned_scaled.align(traj_ref, correct_scale=True)
logger.info("\nUmeyama alignment with scaling only")
traj_est_aligned_only_scaled = copy.deepcopy(traj_est)
traj_est_aligned_only_scaled.align(traj_ref, correct_only_scale=True)
fig = plt.figure(figsize=(8, 8))
plot_mode = plot.PlotMode.xz
ax = plot.prepare_axis(fig, plot_mode, subplot_arg=221)
plot.traj(ax, plot_mode, traj_ref, '--', 'gray')
plot.traj(ax, plot_mode, traj_est, '-', 'blue')
fig.axes.append(ax)
plt.title('not aligned')
ax = plot.prepare_axis(fig, plot_mode, subplot_arg=222)
plot.traj(ax, plot_mode, traj_ref, '--', 'gray')
plot.traj(ax, plot_mode, traj_est_aligned, '-', 'blue')
fig.axes.append(ax)
plt.title('$\mathrm{SE}(3)$ alignment')
ax = plot.prepare_axis(fig, plot_mode, subplot_arg=223)
plot.traj(ax, plot_mode, traj_ref, '--', 'gray')
plot.traj(ax, plot_mode, traj_est_aligned_scaled, '-', 'blue')
fig.axes.append(ax)
plt.title('$\mathrm{Sim}(3)$ alignment')
ax = plot.prepare_axis(fig, plot_mode, subplot_arg=224)
plot.traj(ax, plot_mode, traj_ref, '--', 'gray')
plot.traj(ax, plot_mode, traj_est_aligned_only_scaled, '-', 'blue')
fig.axes.append(ax)
plt.title('only scaled')
fig.tight_layout()
plt.show()
| 2,735 | 31.188235 | 79 | py |
evo | evo-master/doc/install_in_virtualenv.md | # Installation in a virtual environment
Virtual environments allow you to install Python packages in an isolated environment.
This is usually a good idea because it reduces the risk that you mess up your system's Python packages by installing globally with `pip`.
Additionally, you can have multiple environments in parallel that don't interfere with each other.
## virtualenv & virtualenvwrapper
`virtualenvwrapper` is highly recommended, it makes using virtual environments much more comfortable.
Below are installation instructions for Ubuntu.
If you use any other OS, see the documentation for how to install it on your system:
* virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/en/latest/
* virtualenv documentation: https://virtualenv.pypa.io/en/latest/
### virtualenvwrapper installation on Ubuntu
***The following steps have been verified on Ubuntu 20. They probably also work on other Debian-based Linux distros.***
Install `virtualenv` and `virtualenvwrapper`:
```shell
sudo apt install python3-virtualenvwrapper
```
Add setup code for `virtualenvwrapper` to your shell startup file:
```shell
echo "export WORKON_HOME=$HOME/.virtualenvs && source /usr/share/virtualenvwrapper/virtualenvwrapper.sh" >> ~/.bashrc
source ~/.bashrc
```
## Setting up a virtualenv for evo
Once `virtualenvwrapper` is installed, we can create the virtual environment.
The `--system-site-packages` flag is recommended if you are using ROS with evo:
it enables to import ROS Python modules that are installed outside of the virtualenv on your system.
```shell
mkvirtualenv evaluation --system-site-packages
```
To activate the environment, type:
```shell
workon evaluation
```
Install evo and its dependencies inside the virtual environment:
```shell
pip install --ignore-installed evo --no-binary evo
# or alternatively from source:
cd <evo> # go to evo base source folder that contains setup.py
pip install --ignore-installed --editable . --no-binary evo
```
Now, the package should be installed in the virtualenv and you can use it.
Check if evo is installed correctly by running:
```
evo
```
To leave the virtualenv, close the shell or type:
```shell
deactivate
```
(activate again with `workon evaluation`)
To delete the environment:
```shell
rmvirtualenv evaluation
```
## Tab completion (UNIX / Bash)
Unfortunately, tab command completion with the [argcomplete](https://github.com/kislyuk/argcomplete) might not work immediately in a virtual environment. You might need to install argcomplete outside of your virtualenv and run `activate-global-python-argcomplete` to make it work globally on your machine.
| 2,631 | 33.631579 | 305 | md |
evo | evo-master/doc/jupyter_notebook.md | To install Jupyter, call:
```
pip install jupyter
jupyter nbextension enable --py --sys-prefix widgetsnbextension
```
### Local Jupyter notebook access
Go to the `evo` source folder in a terminal and run: `jupyter notebook` (starts server and opens browser window with notebook).
### Remote Jupyter notebook access
Notebook servers can also be accessed via the browser of a remote PC on the local network without installing Jupyter.
**Do once:**
* disable tokens on your **server** side:
* `jupyter notebook --generate-config`
* go to the generated config file, uncomment and change the `c.NotebookApp.token` parameter to an empty string
* **TODO**: enable password authentication without annoying tokens
**Anytime you want to start a server:**
* start the notebook on the **server**: `jupyter notebook --no-browser --port=8888`
* access notebook on **remote** PC:
* establish SSH forwarding: `ssh username@remotehost -L 8889:localhost:8888`
* this forwards remote 8888 port to local 8889 (numbers are just examples)
* open the notebook in a browser: `localhost:8889` | 1,087 | 37.857143 | 127 | md |
evo | evo-master/doc/performance.md | # Performance
Although Python's adavantages are definitely more in the area of flexibility than in in raw performance, evo still tries to work as efficient as possible to keep annoying delays small. This is achieved with efficient algorithms, heavy use of libs like numpy or pandas for handling large data, lazy evaluation patterns, list/dict comprehensions over plain loops etc. - all things that you don't care about as a user but make it faster to work with.
Even if you don't really need all features, you can consider it also as a replacement for simpler scripts.
Here's a comparison with the Python-based evaluation tools from the popular TUM RGB-D dataset ([source](https://vision.in.tum.de/data/datasets/rgbd-dataset/tools), [extended version](https://github.com/raulmur/evaluate_ate_scale)) using a rather large ground truth trajectory with 20957 poses ([data](../test/data)). Small numerical differences are expected.
**Absolute translation error (same settings)**
```
$ time ./evaluate_ate.py fr2_desk_groundtruth.txt fr2_desk_ORB.txt --verbose
compared_pose_pairs 2223 pairs
absolute_translational_error.rmse 0.008144 m
absolute_translational_error.mean 0.007514 m
absolute_translational_error.median 0.007432 m
absolute_translational_error.std 0.003140 m
absolute_translational_error.min 0.000332 m
absolute_translational_error.max 0.024329 m
real 0m16.753s
user 0m16.824s
sys 0m0.204s
```
---
```
$ time evo_ape tum fr2_desk_groundtruth.txt fr2_desk_ORB.txt --align
APE w.r.t. translation part (m)
(with SE(3) Umeyama alignment)
max 0.024300
mean 0.007492
median 0.007415
min 0.000350
rmse 0.008119
sse 0.143305
std 0.003129
real 0m0.735s
user 0m0.764s
sys 0m0.272s
```
The difference is so obvious that further profiling is not really needed. But it shows that `evaluate_ate.py` spends most of its time associating the timestamps of the two trajectories, which is implemented more efficient in evo's sync module.
## Plotting
...makes everything a bit slower unfortunately, mainly because loading matplotlib consumes up to a few seconds. If you want to do a large number of plots, consider coding a custom script that loads matplotlib only once.
| 2,220 | 40.12963 | 447 | md |
evo | evo-master/doc/examples/custom_app.py | #!/usr/bin/env python
print("loading required evo modules")
from evo.core import trajectory, sync, metrics
from evo.tools import file_interface
print("loading trajectories")
traj_ref = file_interface.read_tum_trajectory_file(
"../../test/data/fr2_desk_groundtruth.txt")
traj_est = file_interface.read_tum_trajectory_file(
"../../test/data/fr2_desk_ORB.txt")
print("registering and aligning trajectories")
traj_ref, traj_est = sync.associate_trajectories(traj_ref, traj_est)
traj_est.align(traj_ref, correct_scale=False)
print("calculating APE")
data = (traj_ref, traj_est)
ape_metric = metrics.APE(metrics.PoseRelation.translation_part)
ape_metric.process_data(data)
ape_statistics = ape_metric.get_all_statistics()
print("mean:", ape_statistics["mean"])
print("loading plot modules")
from evo.tools import plot
import matplotlib.pyplot as plt
print("plotting")
plot_collection = plot.PlotCollection("Example")
# metric values
fig_1 = plt.figure(figsize=(8, 8))
plot.error_array(fig_1.gca(), ape_metric.error, statistics=ape_statistics,
name="APE", title=str(ape_metric))
plot_collection.add_figure("raw", fig_1)
# trajectory colormapped with error
fig_2 = plt.figure(figsize=(8, 8))
plot_mode = plot.PlotMode.xy
ax = plot.prepare_axis(fig_2, plot_mode)
plot.traj(ax, plot_mode, traj_ref, '--', 'gray', 'reference')
plot.traj_colormap(ax, traj_est, ape_metric.error, plot_mode,
min_map=ape_statistics["min"],
max_map=ape_statistics["max"],
title="APE mapped onto trajectory")
plot_collection.add_figure("traj (error)", fig_2)
# trajectory colormapped with speed
fig_3 = plt.figure(figsize=(8, 8))
plot_mode = plot.PlotMode.xy
ax = plot.prepare_axis(fig_3, plot_mode)
speeds = [
trajectory.calc_speed(traj_est.positions_xyz[i],
traj_est.positions_xyz[i + 1],
traj_est.timestamps[i], traj_est.timestamps[i + 1])
for i in range(len(traj_est.positions_xyz) - 1)
]
speeds.append(0)
plot.traj(ax, plot_mode, traj_ref, '--', 'gray', 'reference')
plot.traj_colormap(ax, traj_est, speeds, plot_mode, min_map=min(speeds),
max_map=max(speeds), title="speed mapped onto trajectory")
fig_3.axes.append(ax)
plot_collection.add_figure("traj (speed)", fig_3)
plot_collection.show()
| 2,336 | 34.953846 | 77 | py |
evo | evo-master/evo/__init__.py | import logging
import os
# https://docs.python.org/3/howto/logging.html#library-config
from logging import NullHandler
logging.getLogger(__name__).addHandler(NullHandler())
PACKAGE_BASE_PATH = os.path.dirname(os.path.abspath(__file__))
__version__ = open(os.path.join(PACKAGE_BASE_PATH,
"version")).read().splitlines()[0]
class EvoException(Exception):
def __init__(self, *args, **kwargs):
# Python 3 base exception doesn't have "message" anymore, only args.
# We restore it here for convenience.
self.message = args[0] if len(args) >= 1 else ""
super(EvoException, self).__init__(*args, **kwargs)
| 672 | 31.047619 | 76 | py |
evo | evo-master/evo/common_ape_rpe.py | """
Common functions for evo_ape and evo_rpe, internal only.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import typing
from evo.core.metrics import PoseRelation, Unit
from evo.core.result import Result
from evo.core.trajectory import PosePath3D, PoseTrajectory3D
logger = logging.getLogger(__name__)
SEP = "-" * 80 # separator line
def load_trajectories(
args: argparse.Namespace
) -> typing.Tuple[PosePath3D, PosePath3D, str, str]:
from evo.tools import file_interface
traj_ref: typing.Union[PosePath3D, PoseTrajectory3D]
traj_est: typing.Union[PosePath3D, PoseTrajectory3D]
if args.subcommand == "tum":
traj_ref = file_interface.read_tum_trajectory_file(args.ref_file)
traj_est = file_interface.read_tum_trajectory_file(args.est_file)
ref_name, est_name = args.ref_file, args.est_file
elif args.subcommand == "kitti":
traj_ref = file_interface.read_kitti_poses_file(args.ref_file)
traj_est = file_interface.read_kitti_poses_file(args.est_file)
ref_name, est_name = args.ref_file, args.est_file
elif args.subcommand == "euroc":
traj_ref = file_interface.read_euroc_csv_trajectory(args.state_gt_csv)
traj_est = file_interface.read_tum_trajectory_file(args.est_file)
ref_name, est_name = args.state_gt_csv, args.est_file
elif args.subcommand in ("bag", "bag2"):
import os
logger.debug("Opening bag file " + args.bag)
if not os.path.exists(args.bag):
raise file_interface.FileInterfaceException(
"File doesn't exist: {}".format(args.bag))
if args.subcommand == "bag2":
from rosbags.rosbag2 import Reader as Rosbag2Reader
bag = Rosbag2Reader(args.bag) # type: ignore
else:
from rosbags.rosbag1 import Reader as Rosbag1Reader
bag = Rosbag1Reader(args.bag) # type: ignore
try:
bag.open()
traj_ref = file_interface.read_bag_trajectory(bag, args.ref_topic)
traj_est = file_interface.read_bag_trajectory(bag, args.est_topic)
ref_name, est_name = args.ref_topic, args.est_topic
finally:
bag.close()
else:
raise KeyError("unknown sub-command: {}".format(args.subcommand))
return traj_ref, traj_est, ref_name, est_name
def get_pose_relation(args: argparse.Namespace) -> PoseRelation:
if args.pose_relation == "full":
pose_relation = PoseRelation.full_transformation
elif args.pose_relation == "rot_part":
pose_relation = PoseRelation.rotation_part
elif args.pose_relation == "trans_part":
pose_relation = PoseRelation.translation_part
elif args.pose_relation == "angle_deg":
pose_relation = PoseRelation.rotation_angle_deg
elif args.pose_relation == "angle_rad":
pose_relation = PoseRelation.rotation_angle_rad
elif args.pose_relation == "point_distance":
pose_relation = PoseRelation.point_distance
elif args.pose_relation == "point_distance_error_ratio":
pose_relation = PoseRelation.point_distance_error_ratio
return pose_relation
def get_delta_unit(args: argparse.Namespace) -> Unit:
delta_unit = Unit.none
if args.delta_unit == "f":
delta_unit = Unit.frames
elif args.delta_unit == "d":
delta_unit = Unit.degrees
elif args.delta_unit == "r":
delta_unit = Unit.radians
elif args.delta_unit == "m":
delta_unit = Unit.meters
return delta_unit
def plot_result(args: argparse.Namespace, result: Result, traj_ref: PosePath3D,
traj_est: PosePath3D,
traj_ref_full: typing.Optional[PosePath3D] = None) -> None:
from evo.tools import plot
from evo.tools.settings import SETTINGS
import matplotlib.pyplot as plt
import numpy as np
logger.debug(SEP)
logger.debug("Plotting results... ")
plot_mode = plot.PlotMode(args.plot_mode)
# Plot the raw metric values.
fig1 = plt.figure(figsize=SETTINGS.plot_figsize)
if (args.plot_x_dimension == "distances"
and "distances_from_start" in result.np_arrays):
x_array = result.np_arrays["distances_from_start"]
x_label = "$d$ (m)"
elif (args.plot_x_dimension == "seconds"
and "seconds_from_start" in result.np_arrays):
x_array = result.np_arrays["seconds_from_start"]
x_label = "$t$ (s)"
else:
x_array = None
x_label = "index"
plot.error_array(
fig1.gca(), result.np_arrays["error_array"], x_array=x_array,
statistics={
s: result.stats[s]
for s in SETTINGS.plot_statistics if s not in ("min", "max")
}, name=result.info["label"], title=result.info["title"],
xlabel=x_label)
# Plot the values color-mapped onto the trajectory.
fig2 = plt.figure(figsize=SETTINGS.plot_figsize)
ax = plot.prepare_axis(fig2, plot_mode)
plot.traj(ax, plot_mode, traj_ref_full if traj_ref_full else traj_ref,
style=SETTINGS.plot_reference_linestyle,
color=SETTINGS.plot_reference_color, label='reference',
alpha=SETTINGS.plot_reference_alpha,
plot_start_end_markers=SETTINGS.plot_start_end_markers)
plot.draw_coordinate_axes(ax, traj_ref, plot_mode,
SETTINGS.plot_reference_axis_marker_scale)
if args.plot_colormap_min is None:
args.plot_colormap_min = result.stats["min"]
if args.plot_colormap_max is None:
args.plot_colormap_max = result.stats["max"]
if args.plot_colormap_max_percentile is not None:
args.plot_colormap_max = np.percentile(
result.np_arrays["error_array"], args.plot_colormap_max_percentile)
plot.traj_colormap(ax, traj_est, result.np_arrays["error_array"],
plot_mode, min_map=args.plot_colormap_min,
max_map=args.plot_colormap_max,
title=result.info["title"],
plot_start_end_markers=SETTINGS.plot_start_end_markers)
plot.draw_coordinate_axes(ax, traj_est, plot_mode,
SETTINGS.plot_axis_marker_scale)
if args.ros_map_yaml:
plot.ros_map(ax, args.ros_map_yaml, plot_mode)
if SETTINGS.plot_pose_correspondences:
plot.draw_correspondence_edges(
ax, traj_est, traj_ref, plot_mode,
style=SETTINGS.plot_pose_correspondences_linestyle,
color=SETTINGS.plot_reference_color,
alpha=SETTINGS.plot_reference_alpha)
fig2.axes.append(ax)
plot_collection = plot.PlotCollection(result.info["title"])
plot_collection.add_figure("raw", fig1)
plot_collection.add_figure("map", fig2)
if args.plot:
plot_collection.show()
if args.save_plot:
plot_collection.export(args.save_plot,
confirm_overwrite=not args.no_warnings)
if args.serialize_plot:
logger.debug(SEP)
plot_collection.serialize(args.serialize_plot,
confirm_overwrite=not args.no_warnings)
plot_collection.close() | 7,800 | 39.21134 | 79 | py |
evo | evo-master/evo/entry_points.py | # -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
separate entry points into pieces to allow common error handling and faster argcomplete
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import sys
import typing
import argcomplete
from evo import EvoException, NullHandler
logger = logging.getLogger(__name__)
KNOWN_EXCEPTIONS = (EvoException, FileNotFoundError)
"""
the actual entry points:
to save time for argcomplete (tab bash completion),
only do required imports in respective module when creating parser
(no expensive global imports)
"""
def ape() -> None:
from evo import main_ape
parser = main_ape.parser()
argcomplete.autocomplete(parser)
launch(main_ape, parser)
def rpe() -> None:
from evo import main_rpe
parser = main_rpe.parser()
argcomplete.autocomplete(parser)
launch(main_rpe, parser)
def res() -> None:
from evo import main_res
parser = main_res.parser()
argcomplete.autocomplete(parser)
launch(main_res, parser)
def traj() -> None:
from evo import main_traj
parser = main_traj.parser()
argcomplete.autocomplete(parser)
launch(main_traj, parser)
def merge_config(args: argparse.Namespace) -> argparse.Namespace:
"""
merge .json config file with the command line args (if --config was defined)
:param args: parsed argparse NameSpace object
:return: merged argparse NameSpace object
"""
import json
if args.config:
with open(args.config) as config:
merged_config_dict = vars(args).copy()
# merge both parameter dicts
config_dict = json.loads(config.read())
merged_config_dict.update(config_dict)
# override args the hacky way
args = argparse.Namespace(**merged_config_dict)
# Override global settings for this session
# if the config file contains matching keys.
from evo.tools.settings import SETTINGS
SETTINGS.update_existing_keys(other=config_dict)
return args
def launch(main_module, parser: argparse.ArgumentParser) -> None:
args = parser.parse_args()
if hasattr(args, "config"):
args = merge_config(args)
try:
main_module.run(args)
except KeyboardInterrupt:
sys.exit(1)
except SystemExit as e:
sys.exit(e.code)
except KNOWN_EXCEPTIONS as e:
logger.error(str(e))
sys.exit(1)
except Exception:
base_logger = logging.getLogger("evo")
if len(base_logger.handlers) == 0 or isinstance(
base_logger.handlers[0], NullHandler):
# In case logging couldn't be configured, print & exit directly.
import traceback
traceback.print_exc()
sys.exit(1)
logger.exception("Unhandled error in " + main_module.__name__)
print("")
err_msg = "evo module " + main_module.__name__ + " crashed"
from evo.tools import settings
if settings.SETTINGS.global_logfile_enabled:
err_msg += " - see " + settings.GLOBAL_LOGFILE_PATH
else:
err_msg += " - no logfile written (disabled)"
logger.error(err_msg)
from evo.tools import user
if not args.no_warnings:
if settings.SETTINGS.global_logfile_enabled and user.confirm(
"Open logfile? (y/n)"):
import webbrowser
webbrowser.open(settings.GLOBAL_LOGFILE_PATH)
sys.exit(1)
| 4,125 | 30.496183 | 87 | py |
evo | evo-master/evo/ipython_config.py | from colorama import Fore, Style
# Configuration file for ipython.
c = get_config() # type: ignore
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
#c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
#c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
#c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
# yapf: disable
c.InteractiveShellApp.exec_lines = [
'from evo.core import lie_algebra, metrics, result, sync, trajectory',
'from evo.tools import file_interface, pandas_bridge, plot, settings',
'from evo.main_ape import ape',
'from evo.main_rpe import rpe',
'import matplotlib.pyplot as plt',
'import numpy as np',
'import seaborn as sns',
'import pandas as pd'
]
# yapf: enable
## A list of dotted module names of IPython extensions to load.
#c.InteractiveShellApp.extensions = []
## dotted module name of an IPython extension to load.
#c.InteractiveShellApp.extra_extension = ''
## A file to be run
#c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',
# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').
#c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
#c.InteractiveShellApp.hide_initial_ns = True
## Configure matplotlib for interactive use with the default matplotlib backend.
#c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
#c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
#c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
#c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
#c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = u''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = u''
## Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
c.BaseIPythonApplication.profile = u'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
#------------------------------------------------------------------------------
## Whether to display a banner upon starting IPython.
#c.TerminalIPythonApp.display_banner = True
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
#c.TerminalIPythonApp.force_interact = False
## Start IPython quickly by skipping the loading of config files.
#c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
#c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
#c.InteractiveShell.ast_transformers = []
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
#c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
#c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
#c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
c.InteractiveShell.banner1 = 'Type "copyright", "credits" or "license" for more information.\n\nIPython -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
## The part of the banner to be printed after the profile
c.InteractiveShell.banner2 = '''
{bright}{green}Welcome to the evo IPython profile!{fr}{sr}
Pre-loaded modules from {bright}evo.core:{blue}
lie_algebra
metrics
result
sync
trajectory
{fr}{sr}
Pre-loaded modules from {bright}evo.tools:{blue}
file_interface
pandas_bridge
plot
{fr}{sr}
Pre-loaded high-level functions for metrics:{blue}
ape(...)
rpe(...)
{fr}
Others: numpy as np, matplotlib.pyplot as plt, pandas as pd, seaborn as sns
'''.format(fr=Fore.RESET, sr=Style.RESET_ALL, bright=Style.BRIGHT,
blue=Fore.BLUE, green=Fore.GREEN)
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
#c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
#c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
#c.InteractiveShell.colors = 'Neutral'
##
#c.InteractiveShell.debug = False
## **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
#c.InteractiveShell.deep_reload = False
## Don't call post-execute functions that have failed in the past.
#c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
#c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
#c.InteractiveShell.enable_html_pager = False
## Total length of command history
#c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
#c.InteractiveShell.history_load_length = 1000
##
#c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
#c.InteractiveShell.logappend = ''
## The name of the logfile to use.
#c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
#c.InteractiveShell.logstart = False
##
#c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
#c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompts_pad_left = True
##
#c.InteractiveShell.quiet = False
##
#c.InteractiveShell.separate_in = '\n'
##
#c.InteractiveShell.separate_out = ''
##
#c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
#c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
#c.InteractiveShell.sphinxify_docstring = False
##
#c.InteractiveShell.wildcards_case_sensitive = True
##
#c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
#------------------------------------------------------------------------------
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
#c.TerminalInteractiveShell.confirm_exit = True
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
#c.TerminalInteractiveShell.display_completions = 'multicolumn'
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
#c.TerminalInteractiveShell.editing_mode = 'emacs'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
#c.TerminalInteractiveShell.editor = 'vi'
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
## Highlight matching brackets.
#c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax
# highlighting:
# manni, igor, lovelace, xcode, vim, autumn, abap, vs, rrt, native, perldoc, borland, arduino, tango, emacs, friendly, monokai, paraiso-dark, colorful, murphy, bw, pastie, rainbow_dash, algol_nu, paraiso-light, trac, default, algol, fruity
#c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
## Override highlighting format for specific tokens
#c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Enable mouse support in the prompt
#c.TerminalInteractiveShell.mouse_support = False
## Class used to generate Prompt token for prompt_toolkit
#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Use `raw_input` for the REPL, without completion, multiline input, and prompt
# colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
#c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the completion menu
#c.TerminalInteractiveShell.space_for_menu = 6
## Automatically set the terminal title
#c.TerminalInteractiveShell.term_title = True
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
#c.TerminalInteractiveShell.true_color = False
#------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
#------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
#c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
#c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
#c.HistoryAccessor.hist_file = u''
#------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
#------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
#c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
#c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
#------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
#c.BaseFormatter.deferred_printers = {}
##
#c.BaseFormatter.enabled = True
##
#c.BaseFormatter.singleton_printers = {}
##
#c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
#------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
#c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
#c.PlainTextFormatter.max_seq_length = 1000
##
#c.PlainTextFormatter.max_width = 79
##
#c.PlainTextFormatter.newline = '\n'
##
#c.PlainTextFormatter.pprint = True
##
#c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer(Configurable) configuration
#------------------------------------------------------------------------------
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
#c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
#------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
#c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
#c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
#c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
#c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
#c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
#------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
#c.StoreMagics.autorestore = False
| 23,177 | 37.184514 | 353 | py |
evo | evo-master/evo/main_ape.py | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
Main executable for calculating the absolute pose error (APE) metric.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import numpy as np
import evo.common_ape_rpe as common
from evo.core import lie_algebra, sync, metrics
from evo.core.result import Result
from evo.core.trajectory import PosePath3D, PoseTrajectory3D
from evo.tools import file_interface, log
from evo.tools.settings import SETTINGS
logger = logging.getLogger(__name__)
SEP = "-" * 80 # separator line
def parser() -> argparse.ArgumentParser:
basic_desc = "Absolute pose error (APE) metric app"
lic = "(c) evo authors"
shared_parser = argparse.ArgumentParser(add_help=False)
algo_opts = shared_parser.add_argument_group("algorithm options")
output_opts = shared_parser.add_argument_group("output options")
usability_opts = shared_parser.add_argument_group("usability options")
algo_opts.add_argument(
"-r", "--pose_relation", default="trans_part",
help="pose relation on which the APE is based", choices=[
"full", "trans_part", "rot_part", "angle_deg", "angle_rad",
"point_distance"
])
algo_opts.add_argument("-a", "--align",
help="alignment with Umeyama's method (no scale)",
action="store_true")
algo_opts.add_argument("-s", "--correct_scale", action="store_true",
help="correct scale with Umeyama's method")
algo_opts.add_argument(
"--n_to_align",
help="the number of poses to use for Umeyama alignment, "
"counted from the start (default: all)", default=-1, type=int)
algo_opts.add_argument(
"--align_origin",
help="align the trajectory origin to the origin of the reference "
"trajectory", action="store_true")
output_opts.add_argument(
"-p",
"--plot",
action="store_true",
help="show plot window",
)
output_opts.add_argument(
"--plot_mode", default=SETTINGS.plot_mode_default,
help="the axes for plot projection",
choices=["xy", "xz", "yx", "yz", "zx", "zy", "xyz"])
output_opts.add_argument(
"--plot_x_dimension", choices=["index", "seconds",
"distances"], default="seconds",
help="dimension that is used on the x-axis of the raw value plot"
"(default: seconds, or index if no timestamps are present)")
output_opts.add_argument(
"--plot_colormap_max", type=float,
help="the upper bound used for the color map plot "
"(default: maximum error value)")
output_opts.add_argument(
"--plot_colormap_min", type=float,
help="the lower bound used for the color map plot "
"(default: minimum error value)")
output_opts.add_argument(
"--plot_colormap_max_percentile", type=float,
help="percentile of the error distribution to be used "
"as the upper bound of the color map plot "
"(in %%, overrides --plot_colormap_max)")
output_opts.add_argument(
"--plot_full_ref",
action="store_true",
help="plot the full, unsynchronized reference trajectory",
)
output_opts.add_argument(
"--ros_map_yaml", help="yaml file of an ROS 2D map image (.pgm/.png)"
" that will be drawn into the plot", default=None)
output_opts.add_argument("--save_plot", default=None,
help="path to save plot")
output_opts.add_argument("--serialize_plot", default=None,
help="path to serialize plot (experimental)")
output_opts.add_argument("--save_results",
help=".zip file path to store results")
output_opts.add_argument("--logfile", help="Local logfile path.",
default=None)
usability_opts.add_argument("--no_warnings", action="store_true",
help="no warnings requiring user confirmation")
usability_opts.add_argument("-v", "--verbose", action="store_true",
help="verbose output")
usability_opts.add_argument("--silent", action="store_true",
help="don't print any output")
usability_opts.add_argument(
"--debug", action="store_true",
help="verbose output with additional debug info")
usability_opts.add_argument(
"-c", "--config",
help=".json file with parameters (priority over command line args)")
main_parser = argparse.ArgumentParser(
description="{} {}".format(basic_desc, lic))
sub_parsers = main_parser.add_subparsers(dest="subcommand")
sub_parsers.required = True
kitti_parser = sub_parsers.add_parser(
"kitti", parents=[shared_parser],
description="{} for KITTI pose files - {}".format(basic_desc, lic))
kitti_parser.add_argument("ref_file",
help="reference pose file (ground truth)")
kitti_parser.add_argument("est_file", help="estimated pose file")
tum_parser = sub_parsers.add_parser(
"tum", parents=[shared_parser],
description="{} for TUM trajectory files - {}".format(basic_desc, lic))
tum_parser.add_argument("ref_file", help="reference trajectory file")
tum_parser.add_argument("est_file", help="estimated trajectory file")
euroc_parser = sub_parsers.add_parser(
"euroc", parents=[shared_parser],
description="{} for EuRoC MAV files - {}".format(basic_desc, lic))
euroc_parser.add_argument(
"state_gt_csv",
help="ground truth: <seq>/mav0/state_groundtruth_estimate0/data.csv")
euroc_parser.add_argument("est_file",
help="estimated trajectory file in TUM format")
bag_parser = sub_parsers.add_parser(
"bag", parents=[shared_parser],
description="{} for ROS bag files - {}".format(basic_desc, lic))
bag_parser.add_argument("bag", help="ROS bag file")
bag_parser.add_argument("ref_topic", help="reference trajectory topic")
bag_parser.add_argument("est_topic", help="estimated trajectory topic")
bag2_parser = sub_parsers.add_parser(
"bag2", parents=[shared_parser],
description="{} for ROS2 bag files - {}".format(basic_desc, lic))
bag2_parser.add_argument("bag", help="ROS2 bag file")
bag2_parser.add_argument("ref_topic", help="reference trajectory topic")
bag2_parser.add_argument("est_topic", help="estimated trajectory topic")
# Add time-sync options to parser of trajectory formats.
for trajectory_parser in {
bag_parser, bag2_parser, euroc_parser, tum_parser
}:
trajectory_parser.add_argument(
"--t_max_diff", type=float, default=0.01,
help="maximum timestamp difference for data association")
trajectory_parser.add_argument(
"--t_offset", type=float, default=0.0,
help="constant timestamp offset for data association")
trajectory_parser.add_argument(
"--t_start", type=float, default=None,
help="only use data with timestamps "
"greater or equal this start time")
trajectory_parser.add_argument(
"--t_end", type=float, default=None,
help="only use data with timestamps less or equal this end time")
return main_parser
def ape(traj_ref: PosePath3D, traj_est: PosePath3D,
pose_relation: metrics.PoseRelation, align: bool = False,
correct_scale: bool = False, n_to_align: int = -1,
align_origin: bool = False, ref_name: str = "reference",
est_name: str = "estimate") -> Result:
# Align the trajectories.
only_scale = correct_scale and not align
alignment_transformation = None
if align or correct_scale:
logger.debug(SEP)
alignment_transformation = lie_algebra.sim3(
*traj_est.align(traj_ref, correct_scale, only_scale, n=n_to_align))
elif align_origin:
logger.debug(SEP)
alignment_transformation = traj_est.align_origin(traj_ref)
# Calculate APE.
logger.debug(SEP)
data = (traj_ref, traj_est)
ape_metric = metrics.APE(pose_relation)
ape_metric.process_data(data)
title = str(ape_metric)
if align and not correct_scale:
title += "\n(with SE(3) Umeyama alignment)"
elif align and correct_scale:
title += "\n(with Sim(3) Umeyama alignment)"
elif only_scale:
title += "\n(scale corrected)"
elif align_origin:
title += "\n(with origin alignment)"
else:
title += "\n(not aligned)"
if (align or correct_scale) and n_to_align != -1:
title += " (aligned poses: {})".format(n_to_align)
ape_result = ape_metric.get_result(ref_name, est_name)
ape_result.info["title"] = title
logger.debug(SEP)
logger.info(ape_result.pretty_str())
ape_result.add_trajectory(ref_name, traj_ref)
ape_result.add_trajectory(est_name, traj_est)
if isinstance(traj_est, PoseTrajectory3D):
seconds_from_start = np.array(
[t - traj_est.timestamps[0] for t in traj_est.timestamps])
ape_result.add_np_array("seconds_from_start", seconds_from_start)
ape_result.add_np_array("timestamps", traj_est.timestamps)
ape_result.add_np_array("distances_from_start", traj_ref.distances)
ape_result.add_np_array("distances", traj_est.distances)
if alignment_transformation is not None:
ape_result.add_np_array("alignment_transformation_sim3",
alignment_transformation)
return ape_result
def run(args: argparse.Namespace) -> None:
log.configure_logging(args.verbose, args.silent, args.debug,
local_logfile=args.logfile)
if args.debug:
from pprint import pformat
parser_str = pformat({arg: getattr(args, arg) for arg in vars(args)})
logger.debug("main_parser config:\n{}".format(parser_str))
logger.debug(SEP)
traj_ref, traj_est, ref_name, est_name = common.load_trajectories(args)
traj_ref_full = None
if args.plot_full_ref:
import copy
traj_ref_full = copy.deepcopy(traj_ref)
if isinstance(traj_ref, PoseTrajectory3D) and isinstance(
traj_est, PoseTrajectory3D):
logger.debug(SEP)
if args.t_start or args.t_end:
if args.t_start:
logger.info("Using time range start: {}s".format(args.t_start))
if args.t_end:
logger.info("Using time range end: {}s".format(args.t_end))
traj_ref.reduce_to_time_range(args.t_start, args.t_end)
logger.debug("Synchronizing trajectories...")
traj_ref, traj_est = sync.associate_trajectories(
traj_ref, traj_est, args.t_max_diff, args.t_offset,
first_name=ref_name, snd_name=est_name)
pose_relation = common.get_pose_relation(args)
result = ape(
traj_ref=traj_ref,
traj_est=traj_est,
pose_relation=pose_relation,
align=args.align,
correct_scale=args.correct_scale,
n_to_align=args.n_to_align,
align_origin=args.align_origin,
ref_name=ref_name,
est_name=est_name,
)
if args.plot or args.save_plot or args.serialize_plot:
common.plot_result(args, result, traj_ref,
result.trajectories[est_name],
traj_ref_full=traj_ref_full)
if args.save_results:
logger.debug(SEP)
if not SETTINGS.save_traj_in_zip:
del result.trajectories[ref_name]
del result.trajectories[est_name]
file_interface.save_res_file(args.save_results, result,
confirm_overwrite=not args.no_warnings)
if __name__ == '__main__':
from evo import entry_points
entry_points.ape()
| 12,617 | 39.703226 | 79 | py |
evo | evo-master/evo/main_config.py | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
simple JSON configuration generator script for executables
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import json
import logging
import os
import sys
import typing
import colorama
from colorama import Style
from pygments import highlight, lexers, formatters
from evo import EvoException
from evo.tools import log, user, settings
from evo.tools.settings_template import DEFAULT_SETTINGS_DICT_DOC
from evo.tools.settings_template import DEFAULT_SETTINGS_DICT
logger = logging.getLogger(__name__)
SEP = "-" * 80
class ConfigError(EvoException):
pass
def log_info_dict_json(data: dict, colored: bool = True) -> None:
data_str = json.dumps(data, indent=4, sort_keys=True)
if colored and os.name != "nt":
data_str = highlight(data_str, lexers.JsonLexer(),
formatters.Terminal256Formatter(style="monokai"))
logger.info(data_str)
def show(config_path: str, colored: bool = True) -> None:
with open(config_path) as config_file:
log_info_dict_json(json.load(config_file), colored)
def merge_json_union(first_file: str, second_file: str,
soft: bool = False) -> None:
with open(first_file, 'r+') as f_1:
config_1 = json.loads(f_1.read())
with open(second_file) as f_2:
config_2 = json.loads(f_2.read())
config_1 = settings.merge_dicts(config_1, config_2, soft)
f_1.truncate(0)
f_1.seek(0)
f_1.write(json.dumps(config_1, indent=4, sort_keys=True))
def is_number(token: str) -> bool:
try:
float(token)
return True
except ValueError:
return False
def finalize_values(config: dict, key: str,
values: typing.List[str]) -> typing.Any:
"""
Turns parsed values into final value(s) for the config at the given key,
e.g. based on the previous type of that parameter or other constraints.
"""
if len(values) == 0:
return None
# Special treatment for plot_seaborn_palette is needed, see #359.
if key == "plot_seaborn_palette":
if len(values) > 1:
return values
from seaborn.palettes import color_palette
try:
color_palette(values[0])
return values[0]
except ValueError:
return values
if isinstance(config[key], bool):
value = values[-1]
if isinstance(value, str) and value.lower() == "false":
return False
elif isinstance(value, str) and value.lower() == "true":
return True
else:
return not config[key]
if not isinstance(config[key], list):
return values[0]
if isinstance(values[0], str) and values[0].lower() in ("[]", "none"):
return []
return values
def set_config(config_path: str, arg_list: typing.Sequence[str]) -> None:
with open(config_path) as config_file:
config = json.load(config_file)
max_idx = len(arg_list) - 1
for i, arg in enumerate(arg_list):
if arg not in config.keys():
continue
if i + 1 <= max_idx and arg_list[i + 1] not in config.keys():
values: typing.List[typing.Any] = []
for j in range(i + 1, max_idx + 1):
value = arg_list[j]
if value in config.keys():
break
if is_number(value):
if int(float(value)) - float(value) != 0:
values.append(float(value))
else:
values.append(int(float(value)))
else:
values.append(value)
config[arg] = finalize_values(config, arg, values)
else:
# no argument, toggle if it's a boolean parameter
config[arg] = not config[arg] if isinstance(config[arg],
bool) else config[arg]
with open(config_path, 'w') as config_file:
config_file.write(json.dumps(config, indent=4, sort_keys=True))
def generate(arg_list: typing.Sequence[str]) -> typing.Dict[str, typing.Any]:
data: typing.Dict[str, typing.Any] = {}
max_idx = len(arg_list) - 1
for i, arg in enumerate(arg_list):
if arg.startswith("-"):
arg = arg[1:] if not arg.startswith("--") else arg[2:]
if (i + 1 <= max_idx
and arg_list[i + 1].startswith("-")) or i + 1 > max_idx:
data[arg] = True # just a boolean flag
else:
values: typing.List[typing.Any] = []
for j in range(i + 1, max_idx + 1):
value = arg_list[j]
if value.startswith("-"):
break
values.append(float(value) if is_number(value) else value)
if len(values) == 1:
values = values[0]
data[arg] = values
return data
SET_HELP = '''
set parameters
Unless -c / --config is specified, the package settings will be used.
--EXAMPLE--
If your configuration looks like this (via 'evo_config show'):
{
"plot_export_format": "svg"
"plot_info_text": true,
}
running:
evo_config set plot_export_format png plot_info_text
will set it to:
{
"plot_export_format": "png"
"plot_info_text": false,
}
'''
GENERATE_HELP = '''
generate configuration files from command-line args
The configuration files are intended to hold command line
parameters used by the respective executables, e.g. 'evo_ape'.
--EXAMPLE--
Running:
evo_config generate --align --plot --plot_mode xz --verbose
will convert the arguments into the JSON format:
{
"align": true,
"plot": true,
"plot_mode": "xz",
"verbose": true
}
List arguments (--arg 1 2 3) are also supported.
To save the configuration, specify -o / --output.
'''
def main() -> None:
import argcomplete
basic_desc = "crappy configuration tool"
lic = "(c) evo authors"
shared_parser = argparse.ArgumentParser(add_help=False)
shared_parser.add_argument("--no_color", help="don't color output",
action="store_true")
main_parser = argparse.ArgumentParser(description="%s %s" %
(basic_desc, lic))
sub_parsers = main_parser.add_subparsers(dest="subcommand")
sub_parsers.required = True
show_parser = sub_parsers.add_parser(
"show", description="show configuration - %s" % lic,
parents=[shared_parser])
show_parser.add_argument(
"config",
help="optional config file to display (default: package settings)",
nargs='?')
show_parser.add_argument("--brief", help="show only the .json data",
action="store_true")
set_parser = sub_parsers.add_parser(
"set", description=SET_HELP, parents=[shared_parser],
formatter_class=argparse.RawTextHelpFormatter)
set_parser.add_argument("params",
choices=list(DEFAULT_SETTINGS_DICT.keys()),
nargs=argparse.REMAINDER, help="parameters to set")
set_parser.add_argument(
"-c", "--config",
help="optional config file (default: package settings)", default=None)
set_parser.add_argument("-m", "--merge",
help="other config file to merge in (priority)",
default=None)
set_parser.add_argument("--soft", help="do a soft-merge (no overwriting)",
action="store_true")
gen_parser = sub_parsers.add_parser(
"generate", description=GENERATE_HELP, parents=[shared_parser],
formatter_class=argparse.RawTextHelpFormatter)
gen_parser.add_argument("-o", "--out",
help="path for config file to generate")
reset_parser = sub_parsers.add_parser(
"reset", description="reset package settings - %s" % lic,
parents=[shared_parser])
reset_parser.add_argument("-y", help="acknowledge automatically",
action="store_true")
reset_parser.add_argument("params",
choices=list(DEFAULT_SETTINGS_DICT.keys()),
nargs=argparse.REMAINDER,
help="parameters to reset")
argcomplete.autocomplete(main_parser)
if len(sys.argv) > 1 and sys.argv[1] == "set":
args, other_args = main_parser.parse_known_args()
other_args = [arg for arg in sys.argv[2:]]
else:
args, other_args = main_parser.parse_known_args()
log.configure_logging()
colorama.init()
config = settings.DEFAULT_PATH
if hasattr(args, "config"):
if args.config:
config = args.config
if args.subcommand == "show":
if not args.brief and not args.config:
style = Style.BRIGHT if not args.no_color else Style.NORMAL
doc_str = "\n".join(
"{0}{1}{2}:\n{3}\n".format(style, k, Style.RESET_ALL, v[1])
for k, v in sorted(DEFAULT_SETTINGS_DICT_DOC.items()))
logger.info(doc_str)
logger.info("{0}\n{1}\n{0}".format(SEP, config))
show(config, colored=not args.no_color)
if config == settings.DEFAULT_PATH and not args.brief:
logger.info(SEP + "\nSee text above for parameter descriptions.")
elif args.subcommand == "set":
if not os.access(config, os.W_OK):
logger.error("No permission to modify " + config)
sys.exit(1)
if other_args or args.merge:
logger.info("{0}\nOld configuration:\n{0}".format(SEP))
show(config, colored=not args.no_color)
try:
set_config(config, other_args)
except ConfigError as e:
logger.error(e)
sys.exit(1)
if args.merge:
merge_json_union(config, args.merge, args.soft)
logger.info(SEP + "\nNew configuration:\n" + SEP)
show(config, colored=not args.no_color)
else:
logger.error("No configuration parameters given (see --help).")
elif args.subcommand == "generate":
if other_args:
logger.info(
"{0}\nParsed by argparse:\n{1}\n"
"{0}\nWARNING:\nMake sure you use the 'long-style' -- options "
"(e.g. --plot) if possible\nand no combined short '-' flags, "
"(e.g. -vp)\n{0}".format(SEP, other_args))
data = generate(other_args)
log_info_dict_json(data, colored=not args.no_color)
if args.out and user.check_and_confirm_overwrite(args.out):
with open(args.out, 'w') as out:
out.write(json.dumps(data, indent=4, sort_keys=True))
elif not args.out:
logger.warning("\n(-o | --out) not specified - saving nothing")
else:
logger.error("No command line arguments given (see --help)")
elif args.subcommand == "reset":
if not os.access(config, os.W_OK):
logger.error("No permission to modify" + config)
sys.exit(1)
if args.params:
settings.reset(settings.DEFAULT_PATH, parameter_subset=args.params)
elif args.y or user.confirm(
"Reset all package settings to the default settings? (y/n)"):
settings.reset()
else:
sys.exit()
logger.info("{0}\nPackage settings after reset:\n{0}".format(SEP))
show(settings.DEFAULT_PATH, colored=not args.no_color)
if __name__ == '__main__':
main()
| 12,425 | 34.706897 | 79 | py |
evo | evo-master/evo/main_evo.py | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
main package executable
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from evo import PACKAGE_BASE_PATH, __version__
from evo.tools import settings
DESC = '''
(c) evo authors - license: run 'evo pkg --license'
More docs are available at: github.com/MichaelGrupp/evo/wiki
Python package for the evaluation of odometry and SLAM
Supported trajectory formats:
* TUM trajectory files
* KITTI pose files
* ROS and ROS2 bagfile with geometry_msgs/PoseStamped,
geometry_msgs/TransformStamped, geometry_msgs/PoseWithCovarianceStamped,
nav_msgs/Odometry topics or TF messages
* EuRoC MAV dataset groundtruth files
The following executables are available:
Metrics:
evo_ape - absolute pose error
evo_rpe - relative pose error
Tools:
evo_traj - tool for analyzing, plotting or exporting multiple trajectories
evo_res - tool for processing multiple result files from the metrics
evo_ipython - IPython shell with pre-loaded evo modules
evo_fig - (experimental) tool for re-opening serialized plots
evo_config - tool for global settings and config file manipulation
'''
def main() -> None:
import sys
import argparse
import argcomplete
main_parser = argparse.ArgumentParser()
shared_parser = argparse.ArgumentParser(add_help=False)
sub_parsers = main_parser.add_subparsers(dest="subcommand")
sub_parsers.required = True
pkg_parser = sub_parsers.add_parser(
"pkg", description="show infos of the package",
parents=[shared_parser])
pkg_parser.add_argument("--info", help="show the package description",
action="store_true")
pkg_parser.add_argument("--version", help="print the package version",
action="store_true")
pkg_parser.add_argument("--pyversion", help="print the Python version",
action="store_true")
pkg_parser.add_argument("--license", help="print the package license",
action="store_true")
pkg_parser.add_argument("--location", help="print the package path",
action="store_true")
pkg_parser.add_argument("--logfile", help="print the logfile path",
action="store_true")
pkg_parser.add_argument("--open_log", help="open the package logfile",
action="store_true")
pkg_parser.add_argument("--clear_log", help="clear package logfile",
action="store_true")
cat_parser = sub_parsers.add_parser(
"cat_log", description="pipe stdin to global evo logfile"
" or print logfile to stdout (if no stdin)", parents=[shared_parser])
cat_parser.add_argument("-l", "--loglevel", help="loglevel of the message",
default="info",
choices=["error", "warning", "info", "debug"])
cat_parser.add_argument("-m", "--message",
help="explicit message instead of pipe")
cat_parser.add_argument("-s", "--source",
help="source name to use for the log message")
cat_parser.add_argument("--clear_log", help="clear logfile before exiting",
action="store_true")
argcomplete.autocomplete(main_parser)
if len(sys.argv[1:]) == 0:
sys.argv.extend(["pkg", "--info"]) # cheap trick because YOLO
args = main_parser.parse_args()
line_end = "\n" if sys.stdout.isatty() else ""
if args.subcommand == "pkg":
if not len(sys.argv) > 2:
pkg_parser.print_help()
sys.exit(1)
if args.license:
print(open(os.path.join(PACKAGE_BASE_PATH, "LICENSE")).read())
if args.info:
main_parser.print_usage()
print(DESC)
if args.version:
print(__version__, end=line_end)
if args.pyversion:
import platform as pf
print(pf.python_version(), end=line_end)
if args.location:
print(PACKAGE_BASE_PATH, end=line_end)
if args.logfile or args.open_log:
print(settings.GLOBAL_LOGFILE_PATH, end=line_end)
if not os.path.exists(settings.GLOBAL_LOGFILE_PATH):
print(
"no logfile found - run: "
"evo_config set global_logfile_enabled", end=line_end)
sys.exit(1)
if args.open_log:
import webbrowser
webbrowser.open(settings.GLOBAL_LOGFILE_PATH)
if args.clear_log:
from evo.tools import user
if user.confirm("clear logfile? (y/n)"):
open(settings.GLOBAL_LOGFILE_PATH, mode='w')
elif args.subcommand == "cat_log":
if os.name == "nt":
print("cat_log feature not available on Windows")
sys.exit(1)
if not args.message and sys.stdin.isatty():
if not os.path.exists(settings.GLOBAL_LOGFILE_PATH):
print(
"no logfile found - run: "
"evo_config set global_logfile_enabled", end=line_end)
else:
print(open(settings.GLOBAL_LOGFILE_PATH).read(), end="")
elif not settings.SETTINGS.global_logfile_enabled:
print("logfile disabled", end=line_end)
sys.exit(1)
else:
import logging
logger = logging.getLogger(__name__)
from evo.tools import log
file_fmt = log.DEFAULT_LONG_FMT
if args.source:
file_fmt = file_fmt.replace(
"%(module)s.%(funcName)s():%(lineno)s", args.source)
log.configure_logging(silent=True, file_fmt=file_fmt)
if not args.message:
msg = sys.stdin.read()
else:
msg = args.message
getattr(logger, args.loglevel)(msg)
if args.clear_log:
open(settings.GLOBAL_LOGFILE_PATH, mode='w')
if __name__ == '__main__':
main()
| 6,718 | 38.994048 | 79 | py |
evo | evo-master/evo/main_fig.py | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
plot editor
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import logging
logger = logging.getLogger(__name__)
SEP = "-" * 80 # separator line
def main() -> None:
import argparse
import argcomplete
basic_desc = "experimental tool for opening a serialized PlotCollection"
lic = "(c) evo authors"
main_parser = argparse.ArgumentParser(description="%s %s" %
(basic_desc, lic))
main_parser.add_argument("in_file",
help="path to a serialized plot_collection")
main_parser.add_argument("-t", "--title",
help="custom title (default: file name)")
main_parser.add_argument("--save_plot", help="path to save plot",
default=None)
main_parser.add_argument("--serialize_plot",
help="path to re-serialize PlotCollection",
default=None)
main_parser.add_argument("--to_html",
help="convert to html (requires mpld3 library)",
action="store_true")
main_parser.add_argument("--no_warnings",
help="no warnings requiring user confirmation",
action="store_true")
argcomplete.autocomplete(main_parser)
args = main_parser.parse_args()
from evo.tools import log, plot, user
log.configure_logging(verbose=True)
if not args.title:
title = os.path.basename(args.in_file)
else:
title = args.title
if not args.no_warnings:
logger.warning(
"This tool is experimental and not guranteed to work.\nOnly works "
"if the same plot settings are used as for serialization.\n"
"If not, try: evo_config show/set \n" + SEP)
plot_collection = plot.PlotCollection(title, deserialize=args.in_file)
logger.debug("Deserialized PlotCollection: " + str(plot_collection))
plot_collection.show()
if args.serialize_plot:
logger.debug(SEP)
plot_collection.serialize(args.serialize_plot,
confirm_overwrite=not args.no_warnings)
if args.save_plot:
logger.debug(SEP)
plot_collection.export(args.save_plot,
confirm_overwrite=not args.no_warnings)
if args.to_html:
import mpld3
logger.debug(SEP + "\nhtml export\n")
for name, fig in plot_collection.figures.items():
html = mpld3.fig_to_html(fig)
out = name + ".html"
with open(out, 'w') as f:
logger.debug(out)
f.write(html)
if not args.no_warnings:
logger.debug(SEP)
if user.confirm("Save changes & overwrite original file " +
args.in_file + "? (y/n)"):
plot_collection.serialize(args.in_file, confirm_overwrite=False)
if __name__ == '__main__':
main()
| 3,672 | 35.73 | 79 | py |
evo | evo-master/evo/main_ipython.py | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
launch a custom IPython shell for evo
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import os
import shutil
import subprocess as sp
import sys
from evo import PACKAGE_BASE_PATH
DESC = '''
Launches an IPython shell with pre-loaded evo modules
(c) evo authors
Unknown command line arguments are forwarded to the ipython executable
'''
def main() -> None:
main_parser = argparse.ArgumentParser(
description=DESC, formatter_class=argparse.RawTextHelpFormatter)
args, other_args = main_parser.parse_known_args()
other_args = [] if other_args is None else other_args
FNULL = open(os.devnull, 'w')
# check if IPython is installed properly
ipython = "ipython3"
if shutil.which(ipython) is None:
# fall back to the non-explicit ipython name if ipython3 is not in PATH
ipython = "ipython"
if shutil.which(ipython) is None:
print("IPython is not installed", file=sys.stderr)
sys.exit(1)
python = ipython[1:]
try:
sp.check_call([ipython, "profile", "locate", "evo"], stdout=FNULL,
stderr=FNULL)
except sp.CalledProcessError:
print("IPython profile for evo is not installed", file=sys.stderr)
sp.call([ipython, "profile", "create", "evo"])
config = os.path.join(PACKAGE_BASE_PATH, "ipython_config.py")
profile_dir = sp.check_output([ipython, "profile", "locate",
"evo"]).decode("utf-8")
profile_dir = profile_dir.rstrip()
shutil.copy(config, os.path.join(profile_dir, "ipython_config.py"))
try:
sp.check_call([python, "-m", "IPython", "--profile", "evo"] +
other_args)
except sp.CalledProcessError as e:
print("IPython error", e.output, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| 2,581 | 31.683544 | 79 | py |
evo | evo-master/evo/main_res.py | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
main executable for viewing result files from the trajectory metric apps
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import sys
import typing
import pandas as pd
from evo.core.result import merge_results, Result
from evo.tools import file_interface, log, user, pandas_bridge
from evo.tools.settings import SETTINGS
logger = logging.getLogger(__name__)
SEP = "-" * 80 # separator line
CONFLICT_TEMPLATE = """
Mismatching titles - risk of aggregating data from different metrics. Conflict:
<<<<<<< {0}
{1}
=======
{2}
>>>>>>> {3}
Only the first one will be used as the title!"""
def parser() -> argparse.ArgumentParser:
basic_desc = "tool for processing one or multiple result files"
lic = "(c) evo authors"
main_parser = argparse.ArgumentParser(description="%s %s" %
(basic_desc, lic))
output_opts = main_parser.add_argument_group("output options")
usability_opts = main_parser.add_argument_group("usability options")
main_parser.add_argument("result_files",
help="one or multiple result files", nargs='+')
main_parser.add_argument("--merge",
help="merge the results into a single one",
action="store_true")
main_parser.add_argument("--use_rel_time",
help="use relative timestamps if available",
action="store_true")
main_parser.add_argument("--use_filenames",
help="use the filenames to label the data",
action="store_true")
main_parser.add_argument("--ignore_title",
help="don't try to find a common metric title",
action="store_true")
output_opts.add_argument("-p", "--plot", help="show plot window",
action="store_true")
output_opts.add_argument("--plot_markers", help="plot with circle markers",
action="store_true")
output_opts.add_argument("--save_plot", help="path to save plot",
default=None)
output_opts.add_argument("--serialize_plot",
help="path to serialize plot (experimental)",
default=None)
output_opts.add_argument(
"--save_table", help="path to a file to save the results in a table",
default=None)
output_opts.add_argument("--logfile", help="Local logfile path.",
default=None)
usability_opts.add_argument("--no_warnings",
help="no warnings requiring user confirmation",
action="store_true")
usability_opts.add_argument("-v", "--verbose", help="verbose output",
action="store_true")
usability_opts.add_argument("--silent", help="don't print any output",
action="store_true")
usability_opts.add_argument(
"--debug", help="verbose output with additional debug info",
action="store_true")
usability_opts.add_argument(
"-c", "--config",
help=".json file with parameters (priority over command line args)")
return main_parser
def run(args: argparse.Namespace) -> None:
pd.options.display.width = 80
pd.options.display.max_colwidth = 20
log.configure_logging(args.verbose, args.silent, args.debug,
local_logfile=args.logfile)
if args.debug:
import pprint
arg_dict = {arg: getattr(args, arg) for arg in vars(args)}
logger.debug("main_parser config:\n{}\n".format(
pprint.pformat(arg_dict)))
df = pandas_bridge.load_results_as_dataframe(args.result_files,
args.use_filenames,
args.merge)
keys = df.columns.values.tolist()
if SETTINGS.plot_usetex:
keys = [key.replace("_", "\\_") for key in keys]
df.columns = keys
duplicates = [x for x in keys if keys.count(x) > 1]
if duplicates:
logger.error("Values of 'est_name' must be unique - duplicates: {}\n"
"Try using the --use_filenames option to use filenames "
"for labeling instead.".format(", ".join(duplicates)))
sys.exit(1)
# derive a common index type if possible - preferably timestamps
common_index = None
time_indices = ["timestamps", "seconds_from_start", "sec_from_start"]
if args.use_rel_time:
del time_indices[0]
for idx in time_indices:
if idx not in df.loc["np_arrays"].index:
continue
if df.loc["np_arrays", idx].isnull().values.any():
continue
else:
common_index = idx
break
# build error_df (raw values) according to common_index
if common_index is None:
# use a non-timestamp index
error_df = pd.DataFrame(df.loc["np_arrays", "error_array"].tolist(),
index=keys).T
else:
error_df = pd.DataFrame()
for key in keys:
new_error_df = pd.DataFrame(
{key: df.loc["np_arrays", "error_array"][key]},
index=df.loc["np_arrays", common_index][key])
duplicates = new_error_df.index.duplicated(keep="first")
if any(duplicates):
logger.warning(
"duplicate indices in error array of {} - "
"keeping only first occurrence of duplicates".format(key))
new_error_df = new_error_df[~duplicates] # type: ignore
error_df = pd.concat([error_df, new_error_df], axis=1)
# check titles
first_title = df.loc["info", "title"][0] if not args.ignore_title else ""
first_file = args.result_files[0]
if not args.no_warnings and not args.ignore_title:
checks = df.loc["info", "title"] != first_title
for i, differs in enumerate(checks):
if not differs:
continue
else:
mismatching_title = df.loc["info", "title"][i]
mismatching_file = args.result_files[i]
logger.debug(SEP)
logger.warning(
CONFLICT_TEMPLATE.format(first_file, first_title,
mismatching_title,
mismatching_file))
if not user.confirm(
"You can use --ignore_title to just aggregate data.\n"
"Go on anyway? - enter 'y' or any other key to exit"):
sys.exit()
logger.debug(SEP)
logger.debug("Aggregated dataframe:\n{}".format(
df.to_string(line_width=80)))
# show a statistics overview
logger.debug(SEP)
if not args.ignore_title:
logger.info("\n" + first_title + "\n\n")
logger.info(df.loc["stats"].T.to_string(line_width=80) + "\n")
if args.save_table:
logger.debug(SEP)
if SETTINGS.table_export_data.lower() == "error_array":
data = error_df
elif SETTINGS.table_export_data.lower() in ("info", "stats"):
data = df.loc[SETTINGS.table_export_data.lower()]
else:
raise ValueError("unsupported export data specifier: {}".format(
SETTINGS.table_export_data))
pandas_bridge.save_df_as_table(data, args.save_table,
confirm_overwrite=not args.no_warnings)
if args.plot or args.save_plot or args.serialize_plot:
# check if data has NaN "holes" due to different indices
inconsistent = error_df.isnull().values.any()
if inconsistent and common_index != "timestamps" and not args.no_warnings:
logger.debug(SEP)
logger.warning("Data lengths/indices are not consistent, "
"raw value plot might not be correctly aligned")
from evo.tools import plot
import matplotlib.pyplot as plt
import seaborn as sns
import math
# use default plot settings
figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1])
use_cmap = SETTINGS.plot_multi_cmap.lower() != "none"
colormap = SETTINGS.plot_multi_cmap if use_cmap else None
linestyles = ["-o" for x in args.result_files
] if args.plot_markers else None
# labels according to first dataset
if "xlabel" in df.loc["info"].index and not df.loc[
"info", "xlabel"].isnull().values.any():
index_label = df.loc["info", "xlabel"][0]
else:
index_label = "$t$ (s)" if common_index else "index"
metric_label = df.loc["info", "label"][0]
plot_collection = plot.PlotCollection(first_title)
# raw value plot
fig_raw = plt.figure(figsize=figsize)
# handle NaNs from concat() above
error_df.interpolate(method="index", limit_area="inside").plot(
ax=fig_raw.gca(), colormap=colormap, style=linestyles,
title=first_title, alpha=SETTINGS.plot_trajectory_alpha)
plt.xlabel(index_label)
plt.ylabel(metric_label)
plt.legend(frameon=True)
plot_collection.add_figure("raw", fig_raw)
# statistics plot
if SETTINGS.plot_statistics:
fig_stats = plt.figure(figsize=figsize)
include = df.loc["stats"].index.isin(SETTINGS.plot_statistics)
if any(include):
df.loc["stats"][include].plot(kind="barh", ax=fig_stats.gca(),
colormap=colormap, stacked=False)
plt.xlabel(metric_label)
plt.legend(frameon=True)
plot_collection.add_figure("stats", fig_stats)
# grid of distribution plots
raw_tidy = pd.melt(error_df, value_vars=list(error_df.columns.values),
var_name="estimate", value_name=metric_label)
col_wrap = 2 if len(args.result_files) <= 2 else math.ceil(
len(args.result_files) / 2.0)
dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap)
# TODO: see issue #98
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dist_grid.map(sns.distplot, metric_label) # fits=stats.gamma
plot_collection.add_figure("histogram", dist_grid.fig)
# box plot
fig_box = plt.figure(figsize=figsize)
ax = sns.boxplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label],
ax=fig_box.gca())
# ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
plot_collection.add_figure("box_plot", fig_box)
# violin plot
fig_violin = plt.figure(figsize=figsize)
ax = sns.violinplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label],
ax=fig_violin.gca())
# ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
plot_collection.add_figure("violin_histogram", fig_violin)
if args.plot:
plot_collection.show()
if args.save_plot:
logger.debug(SEP)
plot_collection.export(args.save_plot,
confirm_overwrite=not args.no_warnings)
if args.serialize_plot:
logger.debug(SEP)
plot_collection.serialize(args.serialize_plot,
confirm_overwrite=not args.no_warnings)
if __name__ == '__main__':
from evo import entry_points
entry_points.res()
| 12,543 | 40.953177 | 100 | py |
evo | evo-master/evo/main_rpe.py | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
Main executable for calculating the relative pose error (RPE) metric.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import numpy as np
import evo.common_ape_rpe as common
from evo.core import lie_algebra, sync, metrics
from evo.core.result import Result
from evo.core.trajectory import PosePath3D, PoseTrajectory3D
from evo.tools import file_interface, log
from evo.tools.settings import SETTINGS
logger = logging.getLogger(__name__)
SEP = "-" * 80 # separator line
def parser() -> argparse.ArgumentParser:
basic_desc = "Relative pose error (RPE) metric app"
lic = "(c) evo authors"
shared_parser = argparse.ArgumentParser(add_help=False)
algo_opts = shared_parser.add_argument_group("algorithm options")
output_opts = shared_parser.add_argument_group("output options")
usability_opts = shared_parser.add_argument_group("usability options")
algo_opts.add_argument(
"-r", "--pose_relation", default="trans_part",
help="pose relation on which the RPE is based", choices=[
"full", "trans_part", "rot_part", "angle_deg", "angle_rad",
"point_distance", "point_distance_error_ratio"
])
algo_opts.add_argument("-a", "--align",
help="alignment with Umeyama's method (no scale)",
action="store_true")
algo_opts.add_argument("-s", "--correct_scale", action="store_true",
help="correct scale with Umeyama's method")
algo_opts.add_argument(
"--n_to_align",
help="the number of poses to use for Umeyama alignment, "
"counted from the start (default: all)", default=-1, type=int)
algo_opts.add_argument(
"--align_origin",
help="align the trajectory origin to the origin of the reference "
"trajectory", action="store_true")
algo_opts.add_argument("-d", "--delta", type=float, default=1,
help="delta between relative poses")
algo_opts.add_argument("-t", "--delta_tol", type=float, default=0.1,
help="relative delta tolerance for all_pairs mode")
algo_opts.add_argument(
"-u", "--delta_unit", default="f",
help="unit of delta - `f` (frames), `d` (deg), `r` (rad), `m`(meters)",
choices=['f', 'd', 'r', 'm'])
algo_opts.add_argument(
"--all_pairs",
action="store_true",
help="use all pairs instead of consecutive pairs",
)
algo_opts.add_argument(
"--pairs_from_reference", action="store_true",
help="determine the pose pairs from the reference trajectory")
output_opts.add_argument(
"-p",
"--plot",
action="store_true",
help="show plot window",
)
output_opts.add_argument(
"--plot_mode", default=SETTINGS.plot_mode_default,
help="the axes for plot projection",
choices=["xy", "xz", "yx", "yz", "zx", "zy", "xyz"])
output_opts.add_argument(
"--plot_x_dimension", choices=["index", "seconds",
"distances"], default="seconds",
help="dimension that is used on the x-axis of the raw value plot"
"(default: seconds, or index if no timestamps are present)")
output_opts.add_argument(
"--plot_colormap_max", type=float,
help="the upper bound used for the color map plot "
"(default: maximum error value)")
output_opts.add_argument(
"--plot_colormap_min", type=float,
help="the lower bound used for the color map plot "
"(default: minimum error value)")
output_opts.add_argument(
"--plot_colormap_max_percentile", type=float,
help="percentile of the error distribution to be used "
"as the upper bound of the color map plot "
"(in %%, overrides --plot_colormap_max)")
output_opts.add_argument(
"--plot_full_ref",
action="store_true",
help="plot the full, unsynchronized reference trajectory",
)
output_opts.add_argument(
"--ros_map_yaml", help="yaml file of an ROS 2D map image (.pgm/.png)"
" that will be drawn into the plot", default=None)
output_opts.add_argument("--save_plot", default=None,
help="path to save plot")
output_opts.add_argument("--serialize_plot", default=None,
help="path to serialize plot (experimental)")
output_opts.add_argument("--save_results",
help=".zip file path to store results")
output_opts.add_argument("--logfile", help="Local logfile path.",
default=None)
usability_opts.add_argument("--no_warnings", action="store_true",
help="no warnings requiring user confirmation")
usability_opts.add_argument("-v", "--verbose", action="store_true",
help="verbose output")
usability_opts.add_argument("--silent", action="store_true",
help="don't print any output")
usability_opts.add_argument(
"--debug", action="store_true",
help="verbose output with additional debug info")
usability_opts.add_argument(
"-c", "--config",
help=".json file with parameters (priority over command line args)")
main_parser = argparse.ArgumentParser(
description="{} {}".format(basic_desc, lic))
sub_parsers = main_parser.add_subparsers(dest="subcommand")
sub_parsers.required = True
kitti_parser = sub_parsers.add_parser(
"kitti", parents=[shared_parser],
description="{} for KITTI pose files - {}".format(basic_desc, lic))
kitti_parser.add_argument("ref_file",
help="reference pose file (ground truth)")
kitti_parser.add_argument("est_file", help="estimated pose file")
tum_parser = sub_parsers.add_parser(
"tum", parents=[shared_parser],
description="{} for TUM trajectory files - {}".format(basic_desc, lic))
tum_parser.add_argument("ref_file", help="reference trajectory file")
tum_parser.add_argument("est_file", help="estimated trajectory file")
euroc_parser = sub_parsers.add_parser(
"euroc", parents=[shared_parser],
description="{} for EuRoC MAV files - {}".format(basic_desc, lic))
euroc_parser.add_argument(
"state_gt_csv",
help="ground truth: <seq>/mav0/state_groundtruth_estimate0/data.csv")
euroc_parser.add_argument("est_file",
help="estimated trajectory file in TUM format")
bag_parser = sub_parsers.add_parser(
"bag", parents=[shared_parser],
description="{} for ROS bag files - {}".format(basic_desc, lic))
bag_parser.add_argument("bag", help="ROS bag file")
bag_parser.add_argument("ref_topic", help="reference trajectory topic")
bag_parser.add_argument("est_topic", help="estimated trajectory topic")
bag2_parser = sub_parsers.add_parser(
"bag2", parents=[shared_parser],
description="{} for ROS2 bag files - {}".format(basic_desc, lic))
bag2_parser.add_argument("bag", help="ROS2 bag file")
bag2_parser.add_argument("ref_topic", help="reference trajectory topic")
bag2_parser.add_argument("est_topic", help="estimated trajectory topic")
# Add time-sync options to parser of trajectory formats.
for trajectory_parser in {
bag_parser, bag2_parser, euroc_parser, tum_parser
}:
trajectory_parser.add_argument(
"--t_max_diff", type=float, default=0.01,
help="maximum timestamp difference for data association")
trajectory_parser.add_argument(
"--t_offset", type=float, default=0.0,
help="constant timestamp offset for data association")
trajectory_parser.add_argument(
"--t_start", type=float, default=None,
help="only use data with timestamps "
"greater or equal this start time")
trajectory_parser.add_argument(
"--t_end", type=float, default=None,
help="only use data with timestamps less or equal this end time")
return main_parser
def rpe(traj_ref: PosePath3D, traj_est: PosePath3D,
pose_relation: metrics.PoseRelation, delta: float,
delta_unit: metrics.Unit, rel_delta_tol: float = 0.1,
all_pairs: bool = False, pairs_from_reference: bool = False,
align: bool = False, correct_scale: bool = False, n_to_align: int = -1,
align_origin: bool = False, ref_name: str = "reference",
est_name: str = "estimate", support_loop: bool = False) -> Result:
# Align the trajectories.
only_scale = correct_scale and not align
alignment_transformation = None
if align or correct_scale:
logger.debug(SEP)
alignment_transformation = lie_algebra.sim3(
*traj_est.align(traj_ref, correct_scale, only_scale, n=n_to_align))
elif align_origin:
logger.debug(SEP)
alignment_transformation = traj_est.align_origin(traj_ref)
# Calculate RPE.
logger.debug(SEP)
data = (traj_ref, traj_est)
rpe_metric = metrics.RPE(pose_relation, delta, delta_unit, rel_delta_tol,
all_pairs, pairs_from_reference)
rpe_metric.process_data(data)
title = str(rpe_metric)
if align and not correct_scale:
title += "\n(with SE(3) Umeyama alignment)"
elif align and correct_scale:
title += "\n(with Sim(3) Umeyama alignment)"
elif only_scale:
title += "\n(scale corrected)"
elif align_origin:
title += "\n(with origin alignment)"
else:
title += "\n(not aligned)"
if (align or correct_scale) and n_to_align != -1:
title += " (aligned poses: {})".format(n_to_align)
rpe_result = rpe_metric.get_result(ref_name, est_name)
rpe_result.info["title"] = title
logger.debug(SEP)
logger.info(rpe_result.pretty_str())
# Restrict trajectories to delta ids for further processing steps.
if support_loop:
# Avoid overwriting if called repeatedly e.g. in Jupyter notebook.
import copy
traj_ref = copy.deepcopy(traj_ref)
traj_est = copy.deepcopy(traj_est)
# Note: the pose at index 0 is added for plotting purposes, although it has
# no RPE value assigned to it since it has no previous pose.
# (for each pair (i, j), the 'delta_ids' represent only j)
delta_ids_with_first_pose = [0] + rpe_metric.delta_ids
traj_ref.reduce_to_ids(delta_ids_with_first_pose)
traj_est.reduce_to_ids(delta_ids_with_first_pose)
rpe_result.add_trajectory(ref_name, traj_ref)
rpe_result.add_trajectory(est_name, traj_est)
if isinstance(traj_est, PoseTrajectory3D):
seconds_from_start = np.array(
[t - traj_est.timestamps[0] for t in traj_est.timestamps])
# Save times/distances of each calculated value.
# Note: here the first index needs that was added before needs to be
# ignored again as it's not relevant for the values (see above).
rpe_result.add_np_array("seconds_from_start", seconds_from_start[1:])
rpe_result.add_np_array("timestamps", traj_est.timestamps[1:])
rpe_result.add_np_array("distances_from_start", traj_ref.distances[1:])
rpe_result.add_np_array("distances", traj_est.distances[1:])
if alignment_transformation is not None:
rpe_result.add_np_array("alignment_transformation_sim3",
alignment_transformation)
return rpe_result
def run(args: argparse.Namespace) -> None:
log.configure_logging(args.verbose, args.silent, args.debug,
local_logfile=args.logfile)
if args.debug:
from pprint import pformat
parser_str = pformat({arg: getattr(args, arg) for arg in vars(args)})
logger.debug("main_parser config:\n{}".format(parser_str))
logger.debug(SEP)
traj_ref, traj_est, ref_name, est_name = common.load_trajectories(args)
pose_relation = common.get_pose_relation(args)
delta_unit = common.get_delta_unit(args)
traj_ref_full = None
if args.plot_full_ref:
import copy
traj_ref_full = copy.deepcopy(traj_ref)
if isinstance(traj_ref, PoseTrajectory3D) and isinstance(
traj_est, PoseTrajectory3D):
logger.debug(SEP)
if args.t_start or args.t_end:
if args.t_start:
logger.info("Using time range start: {}s".format(args.t_start))
if args.t_end:
logger.info("Using time range end: {}s".format(args.t_end))
traj_ref.reduce_to_time_range(args.t_start, args.t_end)
logger.debug("Synchronizing trajectories...")
traj_ref, traj_est = sync.associate_trajectories(
traj_ref, traj_est, args.t_max_diff, args.t_offset,
first_name=ref_name, snd_name=est_name)
result = rpe(
traj_ref=traj_ref,
traj_est=traj_est,
pose_relation=pose_relation,
delta=args.delta,
delta_unit=delta_unit,
rel_delta_tol=args.delta_tol,
all_pairs=args.all_pairs,
pairs_from_reference=args.pairs_from_reference,
align=args.align,
correct_scale=args.correct_scale,
n_to_align=args.n_to_align,
align_origin=args.align_origin,
ref_name=ref_name,
est_name=est_name,
)
if args.plot or args.save_plot or args.serialize_plot:
common.plot_result(args, result, traj_ref,
result.trajectories[est_name],
traj_ref_full=traj_ref_full)
if args.save_results:
logger.debug(SEP)
if not SETTINGS.save_traj_in_zip:
del result.trajectories[ref_name]
del result.trajectories[est_name]
file_interface.save_res_file(args.save_results, result,
confirm_overwrite=not args.no_warnings)
if __name__ == '__main__':
from evo import entry_points
entry_points.rpe()
| 14,788 | 41.254286 | 79 | py |
evo | evo-master/evo/main_traj.py | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
main executable for trajectory analysis
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import datetime
import logging
import os
from natsort import natsorted
from evo.tools.settings import SETTINGS
logger = logging.getLogger(__name__)
SEP = "-" * 80
def parser() -> argparse.ArgumentParser:
basic_desc = "trajectory analysis and manipulation tool"
lic = "(c) evo authors"
shared_parser = argparse.ArgumentParser(add_help=False)
algo_opts = shared_parser.add_argument_group("algorithm options")
output_opts = shared_parser.add_argument_group("output options")
usability_opts = shared_parser.add_argument_group("usability options")
shared_parser.add_argument("-f", "--full_check",
help="run all checks and print all stats",
action="store_true")
algo_opts.add_argument("-a", "--align",
help="alignment with Umeyama's method (no scale)"
" - requires --ref", action="store_true")
algo_opts.add_argument("-s", "--correct_scale",
help="scale correction with Umeyama's method"
" - requires --ref", action="store_true")
algo_opts.add_argument(
"--n_to_align",
help="the number of poses to use for Umeyama alignment, "
"counted from the start (default: all)", default=-1, type=int)
algo_opts.add_argument(
"--align_origin",
help="align the trajectory origin to the origin of the reference "
"trajectory", action="store_true")
algo_opts.add_argument(
"--sync",
help="associate trajectories via matching timestamps - requires --ref",
action="store_true")
algo_opts.add_argument(
"--transform_left", help="path to a .json file with a transformation"
" to apply to the trajectories (left multiplicative)")
algo_opts.add_argument(
"--transform_right", help="path to a .json file with a transformation"
" to apply to the trajectories (right_multiplicative)")
algo_opts.add_argument(
"--propagate_transform", help="with --transform_right: transform each "
"pose and propagate resulting drift to the next.", action="store_true")
algo_opts.add_argument("--invert_transform",
help="invert the transformation of the .json file",
action="store_true")
algo_opts.add_argument(
"--ref", help="trajectory that will be marked/used as the reference")
algo_opts.add_argument(
"--t_offset",
help="add a constant timestamp offset (not adding to --ref trajectory)",
default=0.0, type=float)
algo_opts.add_argument(
"--t_max_diff",
help="maximum timestamp difference for data association", default=0.01,
type=float)
algo_opts.add_argument(
"--merge", help="merge the trajectories in a single trajectory",
action="store_true")
output_opts.add_argument("-p", "--plot", help="show plot window",
action="store_true")
output_opts.add_argument(
"--plot_relative_time", action="store_true",
help="show timestamps relative to the start of the reference")
output_opts.add_argument(
"--plot_mode", help="the axes for plot projection",
default=SETTINGS.plot_mode_default,
choices=["xy", "xz", "yx", "yz", "zx", "zy", "xyz"])
output_opts.add_argument(
"--ros_map_yaml", help="yaml file of an ROS 2D map image (.pgm/.png)"
" that will be drawn into the plot", default=None)
output_opts.add_argument("--save_plot", help="path to save plot",
default=None)
output_opts.add_argument("--save_table", help="path to save table with statistics",
default=None)
output_opts.add_argument("--serialize_plot",
help="path to serialize plot (experimental)",
default=None)
output_opts.add_argument("--save_as_tum",
help="save trajectories in TUM format (as *.tum)",
action="store_true")
output_opts.add_argument("--save_as_kitti",
help="save poses in KITTI format (as *.kitti)",
action="store_true")
output_opts.add_argument("--save_as_bag",
help="save trajectories in ROS bag as <date>.bag",
action="store_true")
output_opts.add_argument("--save_as_bag2",
help="save trajectories in ROS2 bag as <date>",
action="store_true")
output_opts.add_argument("--logfile", help="Local logfile path.",
default=None)
usability_opts.add_argument("--no_warnings",
help="no warnings requiring user confirmation",
action="store_true")
usability_opts.add_argument("-v", "--verbose", help="verbose output",
action="store_true")
usability_opts.add_argument(
"--show_full_names", help="don't shorten input file paths when "
"displaying trajectory names", action="store_true")
usability_opts.add_argument("--silent", help="don't print any output",
action="store_true")
usability_opts.add_argument(
"--debug", help="verbose output with additional debug info",
action="store_true")
usability_opts.add_argument(
"-c", "--config",
help=".json file with parameters (priority over command line args)")
main_parser = argparse.ArgumentParser(description="%s %s" % (basic_desc,
lic))
sub_parsers = main_parser.add_subparsers(dest="subcommand")
sub_parsers.required = True
kitti_parser = sub_parsers.add_parser(
"kitti", description="%s for KITTI pose files - %s" %
(basic_desc, lic), parents=[shared_parser])
kitti_parser.add_argument("pose_files", help="one or multiple pose files",
nargs='+')
tum_parser = sub_parsers.add_parser(
"tum", description="%s for TUM trajectory files - %s" %
(basic_desc, lic), parents=[shared_parser])
tum_parser.add_argument("traj_files",
help="one or multiple trajectory files", nargs='+')
euroc_parser = sub_parsers.add_parser(
"euroc", description="%s for EuRoC MAV .csv's - %s" %
(basic_desc, lic), parents=[shared_parser])
euroc_parser.add_argument(
"state_gt_csv",
help="<sequence>/mav0/state_groundtruth_estimate0/data.csv", nargs='+')
bag_parser = sub_parsers.add_parser(
"bag", description="%s for ROS bag files - %s" % (basic_desc, lic),
parents=[shared_parser])
bag_parser.add_argument("bag", help="ROS bag file")
bag_parser.add_argument("topics", help="multiple trajectory topics",
nargs='*')
bag_parser.add_argument("--all_topics",
help="use all compatible topics in the bag",
action="store_true")
bag2_parser = sub_parsers.add_parser(
"bag2", description="%s for ROS2 bag files - %s" % (basic_desc, lic),
parents=[shared_parser])
bag2_parser.add_argument("bag", help="ROS2 bag file")
bag2_parser.add_argument("topics", help="multiple trajectory topics",
nargs='*')
bag2_parser.add_argument("--all_topics",
help="use all compatible topics in the bag",
action="store_true")
return main_parser
def die(msg):
import sys
logger.error(msg)
sys.exit(1)
def load_trajectories(args):
from collections import OrderedDict
from evo.tools import file_interface
trajectories = OrderedDict()
ref_traj = None
if args.subcommand == "tum":
for traj_file in args.traj_files:
if traj_file == args.ref:
continue
trajectories[traj_file] = file_interface.read_tum_trajectory_file(
traj_file)
if args.ref:
ref_traj = file_interface.read_tum_trajectory_file(args.ref)
elif args.subcommand == "kitti":
for pose_file in args.pose_files:
if pose_file == args.ref:
continue
trajectories[pose_file] = file_interface.read_kitti_poses_file(
pose_file)
if args.ref:
ref_traj = file_interface.read_kitti_poses_file(args.ref)
elif args.subcommand == "euroc":
for csv_file in args.state_gt_csv:
if csv_file == args.ref:
continue
else:
trajectories[
csv_file] = file_interface.read_euroc_csv_trajectory(
csv_file)
if args.ref:
ref_traj = file_interface.read_euroc_csv_trajectory(args.ref)
elif args.subcommand in ("bag", "bag2"):
if not (args.topics or args.all_topics):
die("No topics used - specify topics or set --all_topics.")
if not os.path.exists(args.bag):
raise file_interface.FileInterfaceException(
"File doesn't exist: {}".format(args.bag))
logger.debug("Opening bag file " + args.bag)
if args.subcommand == "bag2":
from rosbags.rosbag2 import Reader as Rosbag2Reader
bag = Rosbag2Reader(args.bag)
else:
from rosbags.rosbag1 import Reader as Rosbag1Reader
bag = Rosbag1Reader(args.bag)
bag.open()
try:
if args.all_topics:
# Note: args.topics can have TF stuff here, so we add it too.
topics = args.topics
topics += natsorted(file_interface.get_supported_topics(bag))
if args.ref in topics:
topics.remove(args.ref)
if len(topics) == 0:
die("No topics of supported types: {}".format(
" ".join(file_interface.SUPPORTED_ROS_MSGS)))
else:
topics = args.topics
for topic in topics:
if topic == args.ref:
continue
trajectories[topic] = file_interface.read_bag_trajectory(
bag, topic)
if args.ref:
ref_traj = file_interface.read_bag_trajectory(bag, args.ref)
finally:
bag.close()
return trajectories, ref_traj
# TODO refactor
def print_traj_info(name, traj, verbose=False, full_check=False):
from evo.core import trajectory
logger.info(SEP)
logger.info("name:\t" + name)
if verbose or full_check:
def print_dict(name: str, data: dict):
string = ""
for key, value in sorted(data.items()):
string += "\n\t" + key + "\t" + str(value)
logger.info(name + ":" + string)
print_dict("infos", traj.get_infos())
if traj.meta:
print_dict("meta", traj.meta)
if full_check:
print_dict("checks", traj.check()[1])
stat_str = ""
try:
stats = traj.get_statistics()
for stat, value in sorted(stats.items()):
if isinstance(value, float):
stat_str += "\n\t" + stat + "\t" + "{0:.6f}".format(
value)
else:
stat_str += value
except trajectory.TrajectoryException as e:
stat_str += "\n\terror - " + str(e)
logger.info("stats:" + stat_str)
else:
logger.info("infos:\t" + str(traj))
def to_filestem(name: str, args: argparse.Namespace) -> str:
if args.subcommand in ("bag", "bag2"):
if name.startswith('/'):
name = name[1:]
name = name.replace(':', '/') # TF ID
return name.replace('/', '_')
return os.path.splitext(os.path.basename(name))[0]
def to_topic_name(name: str, args: argparse.Namespace) -> str:
if args.subcommand in ("bag", "bag2"):
return name.replace(':', '/')
return '/' + os.path.splitext(os.path.basename(name))[0].replace(' ', '_')
def to_compact_name(name: str, args: argparse.Namespace,
latex_friendly=False) -> str:
if not args.show_full_names and args.subcommand not in ("bag", "bag2"):
# /some/super/long/path/that/nobody/cares/about/traj.txt -> traj
name = os.path.splitext(os.path.basename(name))[0]
if latex_friendly:
name = name.replace("_", "\\_")
return name
def run(args):
import sys
import numpy as np
import evo.core.lie_algebra as lie
from evo.core import trajectory
from evo.core.trajectory import PoseTrajectory3D
from evo.tools import file_interface, log
log.configure_logging(verbose=args.verbose, silent=args.silent,
debug=args.debug, local_logfile=args.logfile)
if args.debug:
import pprint
logger.debug("main_parser config:\n" + pprint.pformat(
{arg: getattr(args, arg)
for arg in vars(args)}) + "\n")
logger.debug(SEP)
trajectories, ref_traj = load_trajectories(args)
if args.merge:
if args.subcommand == "kitti":
die("Can't merge KITTI files.")
if len(trajectories) == 0:
die("No trajectories to merge (excluding --ref).")
trajectories = {
"merged_trajectory": trajectory.merge(trajectories.values())
}
if args.t_offset:
logger.debug(SEP)
for name, traj in trajectories.items():
if type(traj) is trajectory.PosePath3D:
die("{} doesn't have timestamps - can't add time offset.".
format(name))
logger.info("Adding time offset to {}: {} (s)".format(
name, args.t_offset))
traj.timestamps += args.t_offset
if args.n_to_align != -1 and not (args.align or args.correct_scale):
die("--n_to_align is useless without --align or/and --correct_scale")
# TODO: this is fugly, but is a quick solution for remembering each synced
# reference when plotting pose correspondences later...
synced = (args.subcommand == "kitti" and ref_traj) or any(
(args.sync, args.align, args.correct_scale, args.align_origin))
synced_refs = {}
if synced:
from evo.core import sync
if not args.ref:
logger.debug(SEP)
die("Can't align or sync without a reference! (--ref) *grunt*")
for name, traj in trajectories.items():
if args.subcommand == "kitti":
ref_traj_tmp = ref_traj
else:
logger.debug(SEP)
ref_traj_tmp, trajectories[name] = sync.associate_trajectories(
ref_traj, traj, max_diff=args.t_max_diff,
first_name="reference", snd_name=name)
if args.align or args.correct_scale:
logger.debug(SEP)
logger.debug("Aligning {} to reference.".format(name))
trajectories[name].align(
ref_traj_tmp, correct_scale=args.correct_scale,
correct_only_scale=args.correct_scale and not args.align,
n=args.n_to_align)
if args.align_origin:
logger.debug(SEP)
logger.debug("Aligning {}'s origin to reference.".format(name))
trajectories[name].align_origin(ref_traj_tmp)
if SETTINGS.plot_pose_correspondences:
synced_refs[name] = ref_traj_tmp
if args.transform_left or args.transform_right:
tf_type = "left" if args.transform_left else "right"
tf_path = args.transform_left \
if args.transform_left else args.transform_right
transform = file_interface.load_transform_json(tf_path)
logger.debug(SEP)
if not lie.is_se3(transform):
logger.warning("Not a valid SE(3) transformation!")
if args.invert_transform:
transform = lie.se3_inverse(transform)
logger.debug("Applying a {}-multiplicative transformation:\n{}".format(
tf_type, transform))
for traj in trajectories.values():
traj.transform(transform, right_mul=args.transform_right,
propagate=args.propagate_transform)
for name, traj in trajectories.items():
print_traj_info(
to_compact_name(name, args), traj, args.verbose, args.full_check)
if args.ref:
print_traj_info(to_compact_name(args.ref, args), ref_traj,
args.verbose, args.full_check)
if args.plot or args.save_plot or args.serialize_plot:
import numpy as np
from evo.tools import plot
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plot_collection = plot.PlotCollection("evo_traj - trajectory plot")
fig_xyz, axarr_xyz = plt.subplots(3, sharex="col", figsize=tuple(
SETTINGS.plot_figsize))
fig_rpy, axarr_rpy = plt.subplots(3, sharex="col", figsize=tuple(
SETTINGS.plot_figsize))
fig_traj = plt.figure(figsize=tuple(SETTINGS.plot_figsize))
plot_mode = plot.PlotMode[args.plot_mode]
ax_traj = plot.prepare_axis(fig_traj, plot_mode)
# for x-axis alignment starting from 0 with --plot_relative_time
start_time = None
if args.ref:
if isinstance(ref_traj, trajectory.PoseTrajectory3D) \
and args.plot_relative_time:
start_time = ref_traj.timestamps[0]
short_traj_name = to_compact_name(
args.ref, args, SETTINGS.plot_usetex)
plot.traj(ax_traj, plot_mode, ref_traj,
style=SETTINGS.plot_reference_linestyle,
color=SETTINGS.plot_reference_color,
label=short_traj_name,
alpha=SETTINGS.plot_reference_alpha,
plot_start_end_markers=SETTINGS.plot_start_end_markers)
plot.draw_coordinate_axes(ax_traj, ref_traj, plot_mode,
SETTINGS.plot_reference_axis_marker_scale)
plot.traj_xyz(
axarr_xyz, ref_traj, style=SETTINGS.plot_reference_linestyle,
color=SETTINGS.plot_reference_color, label=short_traj_name,
alpha=SETTINGS.plot_reference_alpha,
start_timestamp=start_time)
plot.traj_rpy(
axarr_rpy, ref_traj, style=SETTINGS.plot_reference_linestyle,
color=SETTINGS.plot_reference_color, label=short_traj_name,
alpha=SETTINGS.plot_reference_alpha,
start_timestamp=start_time)
elif args.plot_relative_time:
# Use lower bound timestamp as the 0 time if there's no reference.
if len(trajectories) > 1:
logger.warning("--plot_relative_time is set for multiple "
"trajectories without --ref. "
"Using the lowest timestamp as zero time.")
start_time = min(traj.timestamps[0]
for _, traj in trajectories.items())
cmap_colors = None
if SETTINGS.plot_multi_cmap.lower() != "none":
cmap = getattr(cm, SETTINGS.plot_multi_cmap)
cmap_colors = iter(cmap(np.linspace(0, 1, len(trajectories))))
for name, traj in trajectories.items():
if cmap_colors is None:
color = next(ax_traj._get_lines.prop_cycler)['color']
else:
color = next(cmap_colors)
short_traj_name = to_compact_name(name, args, SETTINGS.plot_usetex)
plot.traj(ax_traj, plot_mode, traj,
SETTINGS.plot_trajectory_linestyle, color,
short_traj_name, alpha=SETTINGS.plot_trajectory_alpha,
plot_start_end_markers=SETTINGS.plot_start_end_markers)
plot.draw_coordinate_axes(ax_traj, traj, plot_mode,
SETTINGS.plot_axis_marker_scale)
if ref_traj and synced and SETTINGS.plot_pose_correspondences:
plot.draw_correspondence_edges(
ax_traj, traj, synced_refs[name], plot_mode, color=color,
style=SETTINGS.plot_pose_correspondences_linestyle,
alpha=SETTINGS.plot_trajectory_alpha)
plot.traj_xyz(axarr_xyz, traj, SETTINGS.plot_trajectory_linestyle,
color, short_traj_name,
alpha=SETTINGS.plot_trajectory_alpha,
start_timestamp=start_time)
plot.traj_rpy(axarr_rpy, traj, SETTINGS.plot_trajectory_linestyle,
color, short_traj_name,
alpha=SETTINGS.plot_trajectory_alpha,
start_timestamp=start_time)
if not SETTINGS.plot_usetex:
fig_rpy.text(0., 0.005, "euler_angle_sequence: {}".format(
SETTINGS.euler_angle_sequence), fontsize=6)
if args.ros_map_yaml:
plot.ros_map(ax_traj, args.ros_map_yaml, plot_mode)
plot_collection.add_figure("trajectories", fig_traj)
plot_collection.add_figure("xyz_view", fig_xyz)
plot_collection.add_figure("rpy_view", fig_rpy)
if args.plot:
plot_collection.show()
if args.save_plot:
logger.info(SEP)
plot_collection.export(args.save_plot,
confirm_overwrite=not args.no_warnings)
if args.serialize_plot:
logger.info(SEP)
plot_collection.serialize(args.serialize_plot,
confirm_overwrite=not args.no_warnings)
if args.save_as_tum:
logger.info(SEP)
for name, traj in trajectories.items():
dest = to_filestem(name, args) + ".tum"
file_interface.write_tum_trajectory_file(
dest, traj, confirm_overwrite=not args.no_warnings)
if args.ref:
dest = to_filestem(args.ref, args) + ".tum"
file_interface.write_tum_trajectory_file(
dest, ref_traj, confirm_overwrite=not args.no_warnings)
if args.save_as_kitti:
logger.info(SEP)
for name, traj in trajectories.items():
dest = to_filestem(name, args) + ".kitti"
file_interface.write_kitti_poses_file(
dest, traj, confirm_overwrite=not args.no_warnings)
if args.ref:
dest = to_filestem(args.ref, args) + ".kitti"
file_interface.write_kitti_poses_file(
dest, ref_traj, confirm_overwrite=not args.no_warnings)
if args.save_as_bag or args.save_as_bag2:
from rosbags.rosbag1 import Writer as Rosbag1Writer
from rosbags.rosbag2 import Writer as Rosbag2Writer
writers = []
if args.save_as_bag:
dest_bag_path = str(
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) + ".bag"
writers.append(Rosbag1Writer(dest_bag_path))
if args.save_as_bag2:
dest_bag_path = str(
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
writers.append(Rosbag2Writer(dest_bag_path))
for writer in writers:
logger.info(SEP)
logger.info("Saving trajectories to " + str(writer.path) + "...")
try:
writer.open()
for name, traj in trajectories.items():
dest_topic = to_topic_name(name, args)
frame_id = traj.meta[
"frame_id"] if "frame_id" in traj.meta else ""
file_interface.write_bag_trajectory(writer, traj,
dest_topic, frame_id)
if args.ref:
dest_topic = to_topic_name(args.ref, args)
frame_id = ref_traj.meta[
"frame_id"] if "frame_id" in ref_traj.meta else ""
file_interface.write_bag_trajectory(writer, ref_traj,
dest_topic, frame_id)
finally:
writer.close()
if args.save_table:
from evo.tools import pandas_bridge
logger.debug(SEP)
df = pandas_bridge.trajectories_stats_to_df(trajectories)
pandas_bridge.save_df_as_table(df, args.save_table,
confirm_overwrite=not args.no_warnings)
if __name__ == '__main__':
from evo import entry_points
entry_points.traj()
| 25,808 | 43.042662 | 87 | py |
evo | evo-master/evo/core/__init__.py | 0 | 0 | 0 | py | |
evo | evo-master/evo/core/filters.py | """
filter algorithms
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import typing
import numpy as np
from evo import EvoException
from evo.core import geometry
from evo.core import lie_algebra as lie
logger = logging.getLogger(__name__)
class FilterException(EvoException):
pass
IdPairs = typing.List[typing.Tuple[int, int]]
def filter_pairs_by_index(poses: typing.Sequence[np.ndarray], delta: int,
all_pairs: bool = False) -> IdPairs:
"""
filters pairs in a list of SE(3) poses by their index distance
:param poses: list of SE(3) poses
:param delta: the index distance used for filtering
:param all_pairs: use all pairs instead of consecutive pairs
:return: list of index tuples of the filtered pairs
"""
if all_pairs:
ids = np.arange(len(poses))
id_pairs = [(i, i + delta) for i in ids if i + delta < len(poses)]
else:
ids = np.arange(0, len(poses), delta)
id_pairs = [(i, j) for i, j in zip(ids, ids[1:])]
return id_pairs
def filter_pairs_by_path(poses: typing.Sequence[np.ndarray], delta: float,
tol: float = 0.0, all_pairs: bool = False) -> IdPairs:
"""
filters pairs in a list of SE(3) poses by their path distance in meters
- the accumulated, traveled path distance between the two pair points
is considered
:param poses: list of SE(3) poses
:param delta: the path distance in meters used for filtering
:param tol: absolute path tolerance to accept or reject pairs
in all_pairs mode
:param all_pairs: use all pairs instead of consecutive pairs
:return: list of index tuples of the filtered pairs
"""
id_pairs = []
if all_pairs:
positions = np.array([pose[:3, 3] for pose in poses])
distances = geometry.accumulated_distances(positions)
for i in range(distances.size - 1):
offset = i + 1
distances_from_here = distances[offset:] - distances[i]
candidate_index = int(
np.argmin(np.abs(distances_from_here - delta)))
if (np.abs(distances_from_here[candidate_index] - delta) > tol):
continue
id_pairs.append((i, candidate_index + offset))
else:
ids = []
previous_pose = poses[0]
current_path = 0.0
for i, current_pose in enumerate(poses):
current_path += float(
np.linalg.norm(current_pose[:3, 3] - previous_pose[:3, 3]))
previous_pose = current_pose
if current_path >= delta:
ids.append(i)
current_path = 0.0
id_pairs = [(i, j) for i, j in zip(ids, ids[1:])]
return id_pairs
def filter_pairs_by_angle(poses: typing.Sequence[np.ndarray], delta: float,
tol: float = 0.0, degrees: bool = False,
all_pairs: bool = False) -> IdPairs:
"""
filters pairs in a list of SE(3) poses by their relative angle
- by default, the angle accumulated on the path between the two pair poses
is considered
- if <all_pairs> is set to True, the direct angle between the two pair
poses is considered
:param poses: list of SE(3) poses
:param delta: the angle in radians used for filtering
:param tol: absolute angle tolerance to accept or reject pairs
in all_pairs mode
:param degrees: set to True if <delta> is in degrees instead of radians
:param all_pairs: use all pairs instead of consecutive pairs
:return: list of index tuples of the filtered pairs
"""
# Angle-axis angles are within [0, pi] / [0, 180] (Euler theorem).
bounds = [0., 180.] if degrees else [0, np.pi]
if delta < bounds[0] or delta > bounds[1]:
raise FilterException(f"delta angle must be within {bounds}")
delta = np.deg2rad(delta) if degrees else delta
tol = np.deg2rad(tol) if degrees else tol
if all_pairs:
upper_bound = delta + tol
lower_bound = delta - tol
id_pairs = []
ids = list(range(len(poses)))
# All pairs search is O(n^2) here. Use vectorized operations with
# scipy.spatial.transform.Rotation for quicker processing.
logger.info("Searching all pairs with matching rotation delta,"
" this can take a while.")
start_indices = ids[:-1]
for i in start_indices:
if not i % 100:
print(int(i / len(start_indices) * 100), "%", end="\r")
offset = i + 1
end_indices = ids[offset:]
rotations_i = lie.sst_rotation_from_matrix(
np.array([poses[i][:3, :3]] * len(end_indices)))
rotations_j = lie.sst_rotation_from_matrix(
np.array([poses[j][:3, :3] for j in end_indices]))
delta_angles = np.linalg.norm(
(rotations_i.inv() * rotations_j).as_rotvec(), axis=1)
matches = np.argwhere((lower_bound <= delta_angles)
& (delta_angles <= upper_bound)) + offset
id_pairs.extend([(i, j) for j in matches.flatten().tolist()])
else:
delta_angles = [
lie.so3_log_angle(lie.relative_so3(p1[:3, :3], p2[:3, :3]))
for p1, p2 in zip(poses, poses[1:])
]
accumulated_delta = 0.0
current_start_index = 0
id_pairs = []
for i, current_delta in enumerate(delta_angles):
end_index = i + 1
accumulated_delta += current_delta
if accumulated_delta >= delta:
id_pairs.append((current_start_index, end_index))
accumulated_delta = 0.0
current_start_index = end_index
return id_pairs
| 6,409 | 38.813665 | 79 | py |
evo | evo-master/evo/core/geometry.py | """
Provides generic geometry algorithms.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import typing
import numpy as np
from evo import EvoException
class GeometryException(EvoException):
pass
UmeyamaResult = typing.Tuple[np.ndarray, np.ndarray, float]
def umeyama_alignment(x: np.ndarray, y: np.ndarray,
with_scale: bool = False) -> UmeyamaResult:
"""
Computes the least squares solution parameters of an Sim(m) matrix
that minimizes the distance between a set of registered points.
Umeyama, Shinji: Least-squares estimation of transformation parameters
between two point patterns. IEEE PAMI, 1991
:param x: mxn matrix of points, m = dimension, n = nr. of data points
:param y: mxn matrix of points, m = dimension, n = nr. of data points
:param with_scale: set to True to align also the scale (default: 1.0 scale)
:return: r, t, c - rotation matrix, translation vector and scale factor
"""
if x.shape != y.shape:
raise GeometryException("data matrices must have the same shape")
# m = dimension, n = nr. of data points
m, n = x.shape
# means, eq. 34 and 35
mean_x = x.mean(axis=1)
mean_y = y.mean(axis=1)
# variance, eq. 36
# "transpose" for column subtraction
sigma_x = 1.0 / n * (np.linalg.norm(x - mean_x[:, np.newaxis])**2)
# covariance matrix, eq. 38
outer_sum = np.zeros((m, m))
for i in range(n):
outer_sum += np.outer((y[:, i] - mean_y), (x[:, i] - mean_x))
cov_xy = np.multiply(1.0 / n, outer_sum)
# SVD (text betw. eq. 38 and 39)
u, d, v = np.linalg.svd(cov_xy)
if np.count_nonzero(d > np.finfo(d.dtype).eps) < m - 1:
raise GeometryException("Degenerate covariance rank, "
"Umeyama alignment is not possible")
# S matrix, eq. 43
s = np.eye(m)
if np.linalg.det(u) * np.linalg.det(v) < 0.0:
# Ensure a RHS coordinate system (Kabsch algorithm).
s[m - 1, m - 1] = -1
# rotation, eq. 40
r = u.dot(s).dot(v)
# scale & translation, eq. 42 and 41
c = 1 / sigma_x * np.trace(np.diag(d).dot(s)) if with_scale else 1.0
t = mean_y - np.multiply(c, r.dot(mean_x))
return r, t, c
def arc_len(x: np.ndarray) -> float:
"""
:param x: nxm array of points, m=dimension
:return: the (discrete approximated) arc-length of the point sequence
"""
return np.sum(np.linalg.norm(x[:-1] - x[1:], axis=1))
def accumulated_distances(x: np.ndarray) -> np.ndarray:
"""
:param x: nxm array of points, m=dimension
:return: the accumulated distances along the point sequence
"""
return np.concatenate(
(np.array([0]), np.cumsum(np.linalg.norm(x[:-1] - x[1:], axis=1))))
| 3,396 | 31.663462 | 79 | py |
evo | evo-master/evo/core/lie_algebra.py | # -*- coding: UTF8 -*-
"""
Provides functions for Lie group calculations.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import typing
import numpy as np
import scipy.spatial.transform as sst
from distutils.version import LooseVersion
from scipy import __version__ as scipy_version
from evo import EvoException
from evo.core import transformations as tr
# scipy.spatial.transform.Rotation.*_matrix() was introduced in 1.4,
# which is not available for Python 2.7.
# Use the legacy direct cosine matrix naming (*_dcm()) if needed.
# TODO: remove this junk once Python 2.7 is finally dead in ROS.
_USE_DCM_NAME = LooseVersion(scipy_version) < LooseVersion("1.4")
class LieAlgebraException(EvoException):
pass
def sst_rotation_from_matrix(so3_matrices: np.ndarray):
"""
Helper for creating scipy.spatial.transform.Rotation
from 1..n SO(3) matrices.
:return: scipy.spatial.transform.Rotation
"""
if _USE_DCM_NAME:
return sst.Rotation.from_dcm(so3_matrices)
else:
return sst.Rotation.from_matrix(so3_matrices)
def hat(v: np.ndarray) -> np.ndarray:
"""
:param v: 3x1 vector
:return: 3x3 skew symmetric matrix
"""
# yapf: disable
return np.array([[0.0, -v[2], v[1]],
[v[2], 0.0, -v[0]],
[-v[1], v[0], 0.0]])
# yapf: enable
def vee(m: np.ndarray) -> np.ndarray:
"""
:param m: 3x3 skew symmetric matrix
:return: 3x1 vector
"""
return np.array([-m[1, 2], m[0, 2], -m[0, 1]])
def so3_exp(rotation_vector: np.ndarray):
"""
Computes an SO(3) matrix from a rotation vector representation.
:param axis: 3x1 rotation vector (axis * angle)
:return: SO(3) rotation matrix (matrix exponential of so(3))
"""
if _USE_DCM_NAME:
return sst.Rotation.from_rotvec(rotation_vector).as_dcm()
else:
return sst.Rotation.from_rotvec(rotation_vector).as_matrix()
def so3_log(r: np.ndarray, return_skew: bool = False) -> np.ndarray:
"""
:param r: SO(3) rotation matrix
:param return_skew: return skew symmetric Lie algebra element
:return:
rotation vector (axis * angle)
or if return_skew is True:
3x3 skew symmetric logarithmic map in so(3) (Ma, Soatto eq. 2.8)
"""
if not is_so3(r):
raise LieAlgebraException("matrix is not a valid SO(3) group element")
rotation_vector = sst_rotation_from_matrix(r).as_rotvec()
if return_skew:
return hat(rotation_vector)
else:
return rotation_vector
def so3_log_angle(r: np.ndarray, degrees: bool = False) -> float:
"""
:param r: SO(3) rotation matrix
:param degrees: whether to return in degrees, default is radians
:return: the rotation angle of the logarithmic map
"""
angle = np.linalg.norm(so3_log(r, return_skew=False))
if degrees:
angle = np.rad2deg(angle)
return float(angle)
def se3(r: np.ndarray = np.eye(3),
t: np.ndarray = np.array([0, 0, 0])) -> np.ndarray:
"""
:param r: SO(3) rotation matrix
:param t: 3x1 translation vector
:return: SE(3) transformation matrix
"""
se3 = np.eye(4)
se3[:3, :3] = r
se3[:3, 3] = t
return se3
def sim3(r: np.ndarray, t: np.ndarray, s: float) -> np.ndarray:
"""
:param r: SO(3) rotation matrix
:param t: 3x1 translation vector
:param s: positive, non-zero scale factor
:return: Sim(3) similarity transformation matrix
"""
sim3 = np.eye(4)
sim3[:3, :3] = s * r
sim3[:3, 3] = t
return sim3
def so3_from_se3(p: np.ndarray) -> np.ndarray:
"""
:param p: absolute SE(3) pose
:return: the SO(3) rotation matrix in p
"""
return p[:3, :3]
def se3_inverse(p: np.ndarray) -> np.ndarray:
"""
:param p: absolute SE(3) pose
:return: the inverted pose
"""
r_inv = p[:3, :3].transpose()
t_inv = -r_inv.dot(p[:3, 3])
return se3(r_inv, t_inv)
def sim3_inverse(a: np.ndarray) -> np.ndarray:
"""
:param a: Sim(3) matrix in form:
s*R t
0 1
:return: inverse Sim(3) matrix
"""
# det(s*R) = s^3 * det(R) | det(R) = 1
# s = det(s*R) ^ 1/3
s = np.power(np.linalg.det(a[:3, :3]), 1 / 3)
r = (1 / s * a[:3, :3]).T
t = -r.dot(1 / s * a[:3, 3])
return sim3(r, t, 1 / s)
def is_so3(r: np.ndarray) -> bool:
"""
:param r: a 3x3 matrix
:return: True if r is in the SO(3) group
"""
# Check the determinant.
det_valid = np.allclose(np.linalg.det(r), [1.0], atol=1e-6)
# Check if the transpose is the inverse.
inv_valid = np.allclose(r.transpose().dot(r), np.eye(3), atol=1e-6)
return det_valid and inv_valid
def is_se3(p: np.ndarray) -> bool:
"""
:param p: a 4x4 matrix
:return: True if p is in the SE(3) group
"""
rot_valid = is_so3(p[:3, :3])
lower_valid = np.equal(p[3, :], np.array([0.0, 0.0, 0.0, 1.0])).all()
return rot_valid and bool(lower_valid)
def is_sim3(p: np.ndarray, s: float) -> bool:
"""
:param p: a 4x4 matrix
:param s: expected scale factor
:return: True if p is in the Sim(3) group with scale s
"""
rot = p[:3, :3]
rot_unscaled = np.multiply(rot, 1.0 / s)
rot_valid = is_so3(rot_unscaled)
lower_valid = np.equal(p[3, :], np.array([0.0, 0.0, 0.0, 1.0])).all()
return rot_valid and bool(lower_valid)
def relative_so3(r1: np.ndarray, r2: np.ndarray) -> np.ndarray:
"""
:param r1, r2: SO(3) matrices
:return: the relative rotation r1^{⁻1} * r2
"""
return np.dot(r1.transpose(), r2)
def relative_se3(p1: np.ndarray, p2: np.ndarray) -> np.ndarray:
"""
:param p1, p2: SE(3) matrices
:return: the relative transformation p1^{⁻1} * p2
"""
return np.dot(se3_inverse(p1), p2)
def random_so3() -> np.ndarray:
"""
:return: a random SO(3) matrix (for debugging)
"""
return tr.random_rotation_matrix()[:3, :3]
def random_se3() -> np.ndarray:
"""
:return: a random SE(3) matrix (for debugging)
"""
r = random_so3()
t = tr.random_vector(3)
return se3(r, t)
| 6,741 | 26.975104 | 78 | py |
evo | evo-master/evo/core/metrics.py | # -*- coding: UTF8 -*-
"""
Provides metrics for the evaluation of SLAM algorithms.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import abc
import logging
import math
import sys
import typing
from enum import Enum
import numpy as np
from evo import EvoException
from evo.core import filters, trajectory
from evo.core.result import Result
from evo.core import lie_algebra as lie
if sys.version_info[0] >= 3 and sys.version_info[1] >= 4:
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
logger = logging.getLogger(__name__)
PathPair = typing.Tuple[trajectory.PosePath3D, trajectory.PosePath3D]
class MetricsException(EvoException):
pass
class StatisticsType(Enum):
rmse = "rmse"
mean = "mean"
median = "median"
std = "std"
min = "min"
max = "max"
sse = "sse"
class PoseRelation(Enum):
full_transformation = "full transformation"
translation_part = "translation part"
rotation_part = "rotation part"
rotation_angle_rad = "rotation angle in radians"
rotation_angle_deg = "rotation angle in degrees"
point_distance = "point distance"
point_distance_error_ratio = "point distance error ratio"
class Unit(Enum):
none = "unit-less"
meters = "m"
seconds = "s"
degrees = "deg"
radians = "rad"
frames = "frames"
percent = "%" # used like a unit for display purposes
class VelUnit(Enum):
meters_per_sec = "m/s"
rad_per_sec = "rad/s"
degrees_per_sec = "deg/s"
class Metric(ABC):
@abc.abstractmethod
def process_data(self, data):
return
@abc.abstractmethod
def get_statistic(self, statistics_type):
return
@abc.abstractmethod
def get_all_statistics(self):
return
@abc.abstractmethod
def get_result(self):
return
class PE(Metric):
"""
Abstract base class of pose error metrics.
"""
def __init__(self):
self.unit = Unit.none
self.error = np.array([])
def __str__(self) -> str:
return "PE metric base class"
@abc.abstractmethod
def process_data(self, data):
return
def get_statistic(self, statistics_type: StatisticsType) -> float:
if statistics_type == StatisticsType.rmse:
squared_errors = np.power(self.error, 2)
return math.sqrt(np.mean(squared_errors))
elif statistics_type == StatisticsType.sse:
squared_errors = np.power(self.error, 2)
return np.sum(squared_errors)
elif statistics_type == StatisticsType.mean:
return float(np.mean(self.error))
elif statistics_type == StatisticsType.median:
return np.median(self.error)
elif statistics_type == StatisticsType.max:
return np.max(self.error)
elif statistics_type == StatisticsType.min:
return np.min(self.error)
elif statistics_type == StatisticsType.std:
return float(np.std(self.error))
else:
raise MetricsException("unsupported statistics_type")
def get_all_statistics(self) -> typing.Dict[str, float]:
"""
:return: a dictionary {StatisticsType.value : float}
"""
statistics = {}
for s in StatisticsType:
try:
statistics[s.value] = self.get_statistic(s)
except MetricsException as e:
if "unsupported statistics_type" not in str(e):
raise
return statistics
def get_result(self, ref_name: str = "reference",
est_name: str = "estimate") -> Result:
"""
Wrap the result in Result object.
:param ref_name: optional, label of the reference data
:param est_name: optional, label of the estimated data
:return:
"""
result = Result()
metric_name = self.__class__.__name__
result.add_info({
"title": str(self),
"ref_name": ref_name,
"est_name": est_name,
"label": "{} {}".format(metric_name,
"({})".format(self.unit.value))
})
result.add_stats(self.get_all_statistics())
if hasattr(self, "error"):
result.add_np_array("error_array", self.error)
return result
class RPE(PE):
"""
RPE: relative pose error
metric for investigating the odometry drift
"""
def __init__(self,
pose_relation: PoseRelation = PoseRelation.translation_part,
delta: float = 1.0, delta_unit: Unit = Unit.frames,
rel_delta_tol: float = 0.1, all_pairs: bool = False,
pairs_from_reference: bool = False):
if delta < 0:
raise MetricsException("delta must be a positive number")
if delta_unit == Unit.frames and not isinstance(delta, int) \
and not delta.is_integer():
raise MetricsException(
"delta must be integer for delta unit {}".format(delta_unit))
self.delta = int(delta) if delta_unit == Unit.frames else delta
self.delta_unit = delta_unit
self.rel_delta_tol = rel_delta_tol
self.pose_relation = pose_relation
self.all_pairs = all_pairs
self.pairs_from_reference = pairs_from_reference
self.E: typing.List[np.ndarray] = []
self.error = np.array([])
self.delta_ids: typing.List[int] = []
if pose_relation in (PoseRelation.translation_part,
PoseRelation.point_distance):
self.unit = Unit.meters
elif pose_relation == PoseRelation.point_distance_error_ratio:
self.unit = Unit.percent
elif pose_relation == PoseRelation.rotation_angle_deg:
self.unit = Unit.degrees
elif pose_relation == PoseRelation.rotation_angle_rad:
self.unit = Unit.radians
else:
# dimension-less
self.unit = Unit.none
def __str__(self) -> str:
title = "RPE w.r.t. {} ({})\nfor delta = {} ({})".format(
self.pose_relation.value, self.unit.value, self.delta,
self.delta_unit.value)
if self.all_pairs:
title += " using all pairs"
else:
title += " using consecutive pairs"
return title
@staticmethod
def rpe_base(Q_i: np.ndarray, Q_i_delta: np.ndarray, P_i: np.ndarray,
P_i_delta: np.ndarray) -> np.ndarray:
"""
Computes the relative SE(3) error pose for a single pose pair
following the notation of the TUM RGB-D paper.
:param Q_i: reference SE(3) pose at i
:param Q_i_delta: reference SE(3) pose at i+delta
:param P_i: estimated SE(3) pose at i
:param P_i_delta: estimated SE(3) pose at i+delta
:return: the RPE matrix E_i in SE(3)
"""
Q_rel = lie.relative_se3(Q_i, Q_i_delta)
P_rel = lie.relative_se3(P_i, P_i_delta)
E_i = lie.relative_se3(Q_rel, P_rel)
return E_i
def process_data(self, data: PathPair) -> None:
"""
Calculates the RPE on a batch of SE(3) poses from trajectories.
:param data: tuple (traj_ref, traj_est) with:
traj_ref: reference evo.trajectory.PosePath or derived
traj_est: estimated evo.trajectory.PosePath or derived
"""
if len(data) != 2:
raise MetricsException(
"please provide data tuple as: (traj_ref, traj_est)")
traj_ref, traj_est = data
if traj_ref.num_poses != traj_est.num_poses:
raise MetricsException(
"trajectories must have same number of poses")
id_pairs = id_pairs_from_delta(
(traj_ref.poses_se3
if self.pairs_from_reference else traj_est.poses_se3), self.delta,
self.delta_unit, self.rel_delta_tol, all_pairs=self.all_pairs)
# Store flat id list e.g. for plotting.
self.delta_ids = [j for i, j in id_pairs]
if self.pose_relation in (PoseRelation.point_distance,
PoseRelation.point_distance_error_ratio):
# Only compares the magnitude of the point distance instead of
# doing the full vector comparison of 'translation_part'.
# Can be directly calculated on positions instead of full poses.
ref_distances = np.array([
np.linalg.norm(traj_ref.positions_xyz[i] -
traj_ref.positions_xyz[j]) for i, j in id_pairs
])
est_distances = np.array([
np.linalg.norm(traj_est.positions_xyz[i] -
traj_est.positions_xyz[j]) for i, j in id_pairs
])
self.error = np.abs(ref_distances - est_distances)
if self.pose_relation == PoseRelation.point_distance_error_ratio:
nonzero = ref_distances.nonzero()[0]
if nonzero.size != ref_distances.size:
logger.warning(
f"Ignoring {ref_distances.size - nonzero.size} zero "
"divisions in ratio calculations.")
self.delta_ids = [self.delta_ids[i] for i in nonzero]
self.error = np.divide(self.error[nonzero],
ref_distances[nonzero]) * 100
else:
# All other pose relations require the full pose error.
self.E = [
self.rpe_base(traj_ref.poses_se3[i], traj_ref.poses_se3[j],
traj_est.poses_se3[i], traj_est.poses_se3[j])
for i, j in id_pairs
]
logger.debug(
"Compared {} relative pose pairs, delta = {} ({}) {}".format(
len(self.E), self.delta, self.delta_unit.value,
("with all pairs." if self.all_pairs \
else "with consecutive pairs.")))
logger.debug("Calculating RPE for {} pose relation...".format(
self.pose_relation.value))
if self.pose_relation in (PoseRelation.point_distance,
PoseRelation.point_distance_error_ratio):
# Already computed, see above.
pass
elif self.pose_relation == PoseRelation.translation_part:
self.error = [np.linalg.norm(E_i[:3, 3]) for E_i in self.E]
elif self.pose_relation == PoseRelation.rotation_part:
# ideal: rot(E_i) = 3x3 identity
self.error = np.array([
np.linalg.norm(lie.so3_from_se3(E_i) - np.eye(3))
for E_i in self.E
])
elif self.pose_relation == PoseRelation.full_transformation:
# ideal: E_i = 4x4 identity
self.error = np.array(
[np.linalg.norm(E_i - np.eye(4)) for E_i in self.E])
elif self.pose_relation == PoseRelation.rotation_angle_rad:
self.error = np.array(
[abs(lie.so3_log_angle(E_i[:3, :3])) for E_i in self.E])
elif self.pose_relation == PoseRelation.rotation_angle_deg:
self.error = np.array(
[abs(lie.so3_log_angle(E_i[:3, :3], True)) for E_i in self.E])
else:
raise MetricsException("unsupported pose_relation: ",
self.pose_relation)
class APE(PE):
"""
APE: absolute pose error
metric for investigating the global consistency of a SLAM trajectory
"""
def __init__(self,
pose_relation: PoseRelation = PoseRelation.translation_part):
self.pose_relation = pose_relation
self.E: typing.List[np.ndarray] = []
self.error = np.array([])
if pose_relation in (PoseRelation.translation_part,
PoseRelation.point_distance):
self.unit = Unit.meters
elif pose_relation == PoseRelation.rotation_angle_deg:
self.unit = Unit.degrees
elif pose_relation == PoseRelation.rotation_angle_rad:
self.unit = Unit.radians
else:
self.unit = Unit.none # dimension-less
def __str__(self) -> str:
title = "APE w.r.t. "
title += (str(self.pose_relation.value) + " " +
("(" + self.unit.value + ")" if self.unit else ""))
return title
@staticmethod
def ape_base(x_t: np.ndarray, x_t_star: np.ndarray) -> np.ndarray:
"""
Computes the absolute error pose for a single SE(3) pose pair
following the notation of the Kümmerle paper.
:param x_t: estimated absolute pose at t
:param x_t_star: reference absolute pose at t
.:return: the delta pose
"""
return lie.relative_se3(x_t, x_t_star)
def process_data(self, data: PathPair) -> None:
"""
Calculates the APE on a batch of SE(3) poses from trajectories.
:param data: tuple (traj_ref, traj_est) with:
traj_ref: reference evo.trajectory.PosePath or derived
traj_est: estimated evo.trajectory.PosePath or derived
"""
if len(data) != 2:
raise MetricsException(
"please provide data tuple as: (traj_ref, traj_est)")
traj_ref, traj_est = data
if traj_ref.num_poses != traj_est.num_poses:
raise MetricsException(
"trajectories must have same number of poses")
if self.pose_relation in (PoseRelation.translation_part,
PoseRelation.point_distance):
# Translation part of APE is equivalent to distance between poses,
# we don't require full SE(3) matrices for faster computation.
self.E = traj_est.positions_xyz - traj_ref.positions_xyz
else:
self.E = [
self.ape_base(x_t, x_t_star) for x_t, x_t_star in zip(
traj_est.poses_se3, traj_ref.poses_se3)
]
logger.debug("Compared {} absolute pose pairs.".format(len(self.E)))
logger.debug("Calculating APE for {} pose relation...".format(
(self.pose_relation.value)))
if self.pose_relation in (PoseRelation.translation_part,
PoseRelation.point_distance):
# E is an array of position vectors only in this case
self.error = np.array([np.linalg.norm(E_i) for E_i in self.E])
elif self.pose_relation == PoseRelation.rotation_part:
self.error = np.array([
np.linalg.norm(lie.so3_from_se3(E_i) - np.eye(3))
for E_i in self.E
])
elif self.pose_relation == PoseRelation.full_transformation:
self.error = np.array(
[np.linalg.norm(E_i - np.eye(4)) for E_i in self.E])
elif self.pose_relation == PoseRelation.rotation_angle_rad:
self.error = np.array(
[abs(lie.so3_log_angle(E_i[:3, :3])) for E_i in self.E])
elif self.pose_relation == PoseRelation.rotation_angle_deg:
self.error = np.array(
[abs(lie.so3_log_angle(E_i[:3, :3], True)) for E_i in self.E])
else:
raise MetricsException("unsupported pose_relation")
def id_pairs_from_delta(poses: typing.Sequence[np.ndarray], delta: float,
delta_unit: Unit, rel_tol: float = 0.1,
all_pairs: bool = False) -> filters.IdPairs:
"""
high-level function - get index tuples of pairs with distance==delta
from a pose list
:param poses: list of SE(3) poses
:param delta: the interval step for indices
:param delta_unit: unit of delta (metrics.Unit enum member)
:param rel_tol: relative tolerance to accept or reject deltas
:param all_pairs: use all pairs instead of consecutive pairs
:return: list of index tuples (pairs)
"""
if delta_unit == Unit.frames:
id_pairs = filters.filter_pairs_by_index(poses, int(delta), all_pairs)
elif delta_unit == Unit.meters:
id_pairs = filters.filter_pairs_by_path(poses, delta, delta * rel_tol,
all_pairs)
elif delta_unit in {Unit.degrees, Unit.radians}:
use_degrees = (delta_unit == Unit.degrees)
id_pairs = filters.filter_pairs_by_angle(poses, delta, delta * rel_tol,
use_degrees, all_pairs)
else:
raise filters.FilterException(
"unsupported delta unit: {}".format(delta_unit))
if len(id_pairs) == 0:
raise filters.FilterException(
"delta = {} ({}) produced an empty index list - try lower values "
"or a less strict tolerance".format(delta, delta_unit.value))
logger.debug(
"Found {} pairs with delta {} ({}) "
"among {} poses ".format(len(id_pairs), delta, delta_unit.value,
len(poses)) +
("using consecutive pairs." if not all_pairs else "using all pairs."))
return id_pairs
| 17,664 | 37.739035 | 79 | py |
evo | evo-master/evo/core/result.py | # -*- coding: UTF8 -*-
"""
container class for results
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
import logging
import typing
import numpy as np
from evo import EvoException
from evo.core.trajectory import PosePath3D
logger = logging.getLogger(__name__)
class ResultException(EvoException):
pass
class Result(object):
def __init__(self):
self.info = {}
self.stats = {}
self.np_arrays = {}
self.trajectories = {}
def __str__(self) -> str:
return self.pretty_str(stats=True)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Result):
return False
equal = (self.info == other.info)
equal &= (self.stats == other.stats)
equal &= (self.trajectories == other.trajectories)
for k in self.np_arrays:
if k not in other.np_arrays:
equal &= False
break
if not equal:
break
equal &= all(
[np.array_equal(self.np_arrays[k], other.np_arrays[k])])
return equal
def __ne__(self, other: object) -> bool:
return not self == other
def pretty_str(self, title=True, stats=True, info=False) -> str:
p_str = ""
if title and "title" in self.info:
p_str += "{}\n\n".format(self.info["title"])
if stats:
for name, val in sorted(self.stats.items()):
p_str += "{:>10}\t{:.6f}\n".format(name, val)
if info:
for name, val in sorted(self.info.items()):
p_str += "{:>10}\t{}\n".format(name, val)
return p_str
def add_np_array(self, name: str, array: np.ndarray) -> None:
self.np_arrays[name] = array
def add_info(self, info_dict: dict) -> None:
self.info.update(info_dict)
def add_stats(self, stats_dict: dict) -> None:
self.stats.update(stats_dict)
def add_trajectory(self, name: str, traj: PosePath3D) -> None:
self.trajectories[name] = traj
def merge_results(results: typing.Sequence[Result]) -> Result:
if not results or not all(isinstance(r, Result) for r in results):
raise ValueError("no results to merge")
if len(results) == 1:
return results[0]
# Check if all results share keys for "stats" and "np_arrays" dicts.
dict_lists = [[r.np_arrays for r in results], [r.stats for r in results]]
for dicts in dict_lists:
if not all(a.keys() == b.keys() for a, b in zip(dicts, dicts[1:])):
raise ResultException("can't merge results with non-matching keys")
# Determine merge strategy:
strategy = "average"
length_lists = [[a.size for a in r.np_arrays.values()] for r in results]
if not all(a == b for a, b in zip(length_lists, length_lists[1:])):
logger.warning("Appending raw value arrays due to different lengths.")
strategy = "append"
else:
logger.info("Averaging raw values of input results in merged result.")
merged_result = copy.deepcopy(results[0])
logger.warning("Using info dict of first result.")
for result in results[1:]:
merged_result.stats = {
key: ((merged_result.stats[key] + result.stats[key]) / 2)
for key in merged_result.stats
}
for key, array in merged_result.np_arrays.items():
if strategy == "average":
merged_result.np_arrays[key] = np.mean(
(array, result.np_arrays[key]), axis=0)
elif strategy == "append":
merged_result.np_arrays[key] = np.append(
array, result.np_arrays[key])
return merged_result
| 4,320 | 32.757813 | 79 | py |
evo | evo-master/evo/core/sync.py | # -*- coding: UTF8 -*-
"""
Provides algorithms for time synchronization.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
import logging
import typing
import numpy as np
from evo import EvoException
from evo.core.trajectory import PoseTrajectory3D
logger = logging.getLogger(__name__)
class SyncException(EvoException):
pass
MatchingIndices = typing.Tuple[typing.List[int], typing.List[int]]
TrajectoryPair = typing.Tuple[PoseTrajectory3D, PoseTrajectory3D]
def matching_time_indices(stamps_1: np.ndarray, stamps_2: np.ndarray,
max_diff: float = 0.01,
offset_2: float = 0.0) -> MatchingIndices:
"""
Searches for the best matching timestamps of two lists of timestamps
and returns the list indices of the best matches.
:param stamps_1: first vector of timestamps (numpy array)
:param stamps_2: second vector of timestamps (numpy array)
:param max_diff: max. allowed absolute time difference
:param offset_2: optional time offset to be applied to stamps_2
:return: 2 lists of the matching timestamp indices (stamps_1, stamps_2)
"""
matching_indices_1 = []
matching_indices_2 = []
stamps_2 = copy.deepcopy(stamps_2)
stamps_2 += offset_2
for index_1, stamp_1 in enumerate(stamps_1):
diffs = np.abs(stamps_2 - stamp_1)
index_2 = int(np.argmin(diffs))
if diffs[index_2] <= max_diff:
matching_indices_1.append(index_1)
matching_indices_2.append(index_2)
return matching_indices_1, matching_indices_2
def associate_trajectories(
traj_1: PoseTrajectory3D, traj_2: PoseTrajectory3D,
max_diff: float = 0.01, offset_2: float = 0.0,
first_name: str = "first trajectory",
snd_name: str = "second trajectory") -> TrajectoryPair:
"""
Synchronizes two trajectories by matching their timestamps.
:param traj_1: trajectory.PoseTrajectory3D object of first trajectory
:param traj_2: trajectory.PoseTrajectory3D object of second trajectory
:param max_diff: max. allowed absolute time difference for associating
:param offset_2: optional time offset of second trajectory
:param first_name: name of first trajectory for verbose logging
:param snd_name: name of second trajectory for verbose/debug logging
:return: traj_1, traj_2 (synchronized)
"""
if not isinstance(traj_1, PoseTrajectory3D) \
or not isinstance(traj_2, PoseTrajectory3D):
raise SyncException("trajectories must be PoseTrajectory3D objects")
snd_longer = len(traj_2.timestamps) > len(traj_1.timestamps)
traj_long = copy.deepcopy(traj_2) if snd_longer else copy.deepcopy(traj_1)
traj_short = copy.deepcopy(traj_1) if snd_longer else copy.deepcopy(traj_2)
max_pairs = len(traj_short.timestamps)
matching_indices_short, matching_indices_long = matching_time_indices(
traj_short.timestamps, traj_long.timestamps, max_diff,
offset_2 if snd_longer else -offset_2)
if len(matching_indices_short) != len(matching_indices_long):
raise SyncException(
"matching_time_indices returned unequal number of indices")
num_matches = len(matching_indices_long)
traj_short.reduce_to_ids(matching_indices_short)
traj_long.reduce_to_ids(matching_indices_long)
traj_1 = traj_short if snd_longer else traj_long
traj_2 = traj_long if snd_longer else traj_short
if num_matches == 0:
raise SyncException(
"found no matching timestamps between {} and {} with max. time "
"diff {} (s) and time offset {} (s)".format(
first_name, snd_name, max_diff, offset_2))
logger.debug(
"Found {} of max. {} possible matching timestamps between...\n"
"\t{}\nand:\t{}\n..with max. time diff.: {} (s) "
"and time offset: {} (s).".format(num_matches, max_pairs, first_name,
snd_name, max_diff, offset_2))
return traj_1, traj_2
| 4,637 | 38.641026 | 79 | py |
evo | evo-master/evo/core/trajectory.py | # -*- coding: UTF8 -*-
"""
some functions for trajectories
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import typing
import numpy as np
from evo import EvoException
import evo.core.transformations as tr
import evo.core.geometry as geometry
from evo.core import lie_algebra as lie
logger = logging.getLogger(__name__)
class TrajectoryException(EvoException):
pass
class PosePath3D(object):
"""
just a path, no temporal information
also: base class for real trajectory
"""
def __init__(
self, positions_xyz: typing.Optional[np.ndarray] = None,
orientations_quat_wxyz: typing.Optional[np.ndarray] = None,
poses_se3: typing.Optional[typing.Sequence[np.ndarray]] = None,
meta: typing.Optional[dict] = None):
"""
:param positions_xyz: nx3 list of x,y,z positions
:param orientations_quat_wxyz: nx4 list of quaternions (w,x,y,z format)
:param poses_se3: list of SE(3) poses
:param meta: optional metadata
"""
if (positions_xyz is None
or orientations_quat_wxyz is None) and poses_se3 is None:
raise TrajectoryException("must provide at least positions_xyz "
"& orientations_quat_wxyz or poses_se3")
if positions_xyz is not None:
self._positions_xyz = np.array(positions_xyz)
if orientations_quat_wxyz is not None:
self._orientations_quat_wxyz = np.array(orientations_quat_wxyz)
if poses_se3 is not None:
self._poses_se3 = poses_se3
if self.num_poses == 0:
raise TrajectoryException("pose data is empty")
self.meta = {} if meta is None else meta
def __str__(self) -> str:
return "{} poses, {:.3f}m path length".format(self.num_poses,
self.path_length)
def __eq__(self, other: object) -> bool:
if not isinstance(other, PosePath3D):
return False
if not self.num_poses == other.num_poses:
return False
equal = True
equal &= all([
np.allclose(p1, p2)
for p1, p2 in zip(self.poses_se3, other.poses_se3)
])
equal &= (np.allclose(self.orientations_quat_wxyz,
other.orientations_quat_wxyz)
or np.allclose(self.orientations_quat_wxyz,
-other.orientations_quat_wxyz))
equal &= np.allclose(self.positions_xyz, other.positions_xyz)
return equal
def __ne__(self, other: object) -> bool:
return not self == other
@property
def positions_xyz(self) -> np.ndarray:
if not hasattr(self, "_positions_xyz"):
assert hasattr(self, "_poses_se3")
self._positions_xyz = np.array([p[:3, 3] for p in self._poses_se3])
return self._positions_xyz
@property
def distances(self) -> np.ndarray:
return geometry.accumulated_distances(self.positions_xyz)
@property
def orientations_quat_wxyz(self) -> np.ndarray:
if not hasattr(self, "_orientations_quat_wxyz"):
assert hasattr(self, "_poses_se3")
self._orientations_quat_wxyz \
= np.array(
[tr.quaternion_from_matrix(p)
for p in self._poses_se3])
return self._orientations_quat_wxyz
def get_orientations_euler(self, axes="sxyz") -> np.ndarray:
if hasattr(self, "_poses_se3"):
return np.array(
[tr.euler_from_matrix(p, axes=axes) for p in self._poses_se3])
assert hasattr(self, "_orientations_quat_wxyz")
return np.array([
tr.euler_from_quaternion(q, axes=axes)
for q in self._orientations_quat_wxyz
])
@property
def poses_se3(self) -> typing.Sequence[np.ndarray]:
if not hasattr(self, "_poses_se3"):
assert hasattr(self, "_positions_xyz")
assert hasattr(self, "_orientations_quat_wxyz")
self._poses_se3 \
= xyz_quat_wxyz_to_se3_poses(self.positions_xyz,
self.orientations_quat_wxyz)
return self._poses_se3
@property
def num_poses(self) -> int:
if hasattr(self, "_poses_se3"):
return len(self._poses_se3)
else:
return self.positions_xyz.shape[0]
@property
def path_length(self) -> float:
"""
calculates the path length (arc-length)
:return: path length in meters
"""
return float(geometry.arc_len(self.positions_xyz))
def transform(self, t: np.ndarray, right_mul: bool = False,
propagate: bool = False) -> None:
"""
apply a left or right multiplicative transformation to the whole path
:param t: a 4x4 transformation matrix (e.g. SE(3) or Sim(3))
:param right_mul: whether to apply it right-multiplicative or not
:param propagate: whether to propagate drift with RHS transformations
"""
if right_mul and not propagate:
# Transform each pose individually.
self._poses_se3 = [np.dot(p, t) for p in self.poses_se3]
elif right_mul and propagate:
# Transform each pose and propagate resulting drift to the next.
ids = np.arange(0, self.num_poses, 1)
rel_poses = [
lie.relative_se3(self.poses_se3[i], self.poses_se3[j]).dot(t)
for i, j in zip(ids, ids[1:])
]
self._poses_se3 = [self.poses_se3[0]]
for i, j in zip(ids[:-1], ids):
self._poses_se3.append(self._poses_se3[j].dot(rel_poses[i]))
else:
self._poses_se3 = [np.dot(t, p) for p in self.poses_se3]
self._positions_xyz, self._orientations_quat_wxyz \
= se3_poses_to_xyz_quat_wxyz(self.poses_se3)
def scale(self, s: float) -> None:
"""
apply a scaling to the whole path
:param s: scale factor
"""
if hasattr(self, "_poses_se3"):
self._poses_se3 = [
lie.se3(p[:3, :3], s * p[:3, 3]) for p in self._poses_se3
]
if hasattr(self, "_positions_xyz"):
self._positions_xyz = s * self._positions_xyz
def align(self, traj_ref: 'PosePath3D', correct_scale: bool = False,
correct_only_scale: bool = False,
n: int = -1) -> geometry.UmeyamaResult:
"""
align to a reference trajectory using Umeyama alignment
:param traj_ref: reference trajectory
:param correct_scale: set to True to adjust also the scale
:param correct_only_scale: set to True to correct the scale, but not the pose
:param n: the number of poses to use, counted from the start (default: all)
:return: the result parameters of the Umeyama algorithm
"""
with_scale = correct_scale or correct_only_scale
if correct_only_scale:
logger.debug("Correcting scale...")
else:
logger.debug("Aligning using Umeyama's method..." +
(" (with scale correction)" if with_scale else ""))
if n == -1:
r_a, t_a, s = geometry.umeyama_alignment(self.positions_xyz.T,
traj_ref.positions_xyz.T,
with_scale)
else:
r_a, t_a, s = geometry.umeyama_alignment(
self.positions_xyz[:n, :].T, traj_ref.positions_xyz[:n, :].T,
with_scale)
if not correct_only_scale:
logger.debug("Rotation of alignment:\n{}"
"\nTranslation of alignment:\n{}".format(r_a, t_a))
logger.debug("Scale correction: {}".format(s))
if correct_only_scale:
self.scale(s)
elif correct_scale:
self.scale(s)
self.transform(lie.se3(r_a, t_a))
else:
self.transform(lie.se3(r_a, t_a))
return r_a, t_a, s
def align_origin(self, traj_ref: 'PosePath3D') -> np.ndarray:
"""
align the origin to the origin of a reference trajectory
:param traj_ref: reference trajectory
:return: the used transformation
"""
if self.num_poses == 0 or traj_ref.num_poses == 0:
raise TrajectoryException("can't align an empty trajectory...")
traj_origin = self.poses_se3[0]
traj_ref_origin = traj_ref.poses_se3[0]
to_ref_origin = np.dot(traj_ref_origin, lie.se3_inverse(traj_origin))
logger.debug(
"Origin alignment transformation:\n{}".format(to_ref_origin))
self.transform(to_ref_origin)
return to_ref_origin
def reduce_to_ids(
self, ids: typing.Union[typing.Sequence[int], np.ndarray]) -> None:
"""
reduce the elements to the ones specified in ids
:param ids: list of integer indices
"""
if hasattr(self, "_positions_xyz"):
self._positions_xyz = self._positions_xyz[ids]
if hasattr(self, "_orientations_quat_wxyz"):
self._orientations_quat_wxyz = self._orientations_quat_wxyz[ids]
if hasattr(self, "_poses_se3"):
self._poses_se3 = [self._poses_se3[idx] for idx in ids]
def check(self) -> typing.Tuple[bool, dict]:
"""
checks if the data is valid
:return: True/False, dictionary with some detailed infos
"""
if self.num_poses == 0:
return True, {}
same_len = self.positions_xyz.shape[0] \
== self.orientations_quat_wxyz.shape[0] \
== len(self.poses_se3)
se3_valid = all([lie.is_se3(p) for p in self.poses_se3])
norms = np.linalg.norm(self.orientations_quat_wxyz, axis=1)
quat_normed = np.allclose(norms, np.ones(norms.shape))
valid = same_len and se3_valid and quat_normed
details = {
"array shapes": "ok"
if same_len else "invalid (lists must have same length)",
"SE(3) conform": "yes"
if se3_valid else "no (poses are not valid SE(3) matrices)",
"quaternions": "ok"
if quat_normed else "invalid (must be unit quaternions)"
}
return valid, details
def get_infos(self) -> dict:
"""
:return: dictionary with some infos about the path
"""
return {
"nr. of poses": self.num_poses,
"path length (m)": self.path_length,
"pos_start (m)": self.positions_xyz[0],
"pos_end (m)": self.positions_xyz[-1]
}
def get_statistics(self) -> dict:
if self.num_poses < 2:
return {}
return {} # no idea yet
class PoseTrajectory3D(PosePath3D, object):
"""
a PosePath with temporal information
"""
def __init__(
self, positions_xyz: typing.Optional[np.ndarray] = None,
orientations_quat_wxyz: typing.Optional[np.ndarray] = None,
timestamps: typing.Optional[np.ndarray] = None,
poses_se3: typing.Optional[typing.Sequence[np.ndarray]] = None,
meta: typing.Optional[dict] = None):
"""
:param timestamps: optional nx1 list of timestamps
"""
super(PoseTrajectory3D,
self).__init__(positions_xyz, orientations_quat_wxyz, poses_se3,
meta)
# this is a bit ugly...
if timestamps is None:
raise TrajectoryException("no timestamps provided")
self.timestamps = np.array(timestamps)
def __str__(self) -> str:
s = super(PoseTrajectory3D, self).__str__()
return s + ", {:.3f}s duration".format(self.timestamps[-1] -
self.timestamps[0])
def __eq__(self, other: object) -> bool:
if not isinstance(other, PoseTrajectory3D):
return False
if not self.num_poses == other.num_poses:
return False
equal = super(PoseTrajectory3D, self).__eq__(other)
equal &= np.allclose(self.timestamps, other.timestamps)
return equal
def __ne__(self, other: object) -> bool:
return not self == other
@property
def speeds(self) -> np.ndarray:
"""
:return: array with speed of motion between poses
"""
if self.num_poses < 2:
return np.array([])
return np.array([
calc_speed(self.positions_xyz[i], self.positions_xyz[i + 1],
self.timestamps[i], self.timestamps[i + 1])
for i in range(len(self.positions_xyz) - 1)
])
def reduce_to_ids(
self, ids: typing.Union[typing.Sequence[int], np.ndarray]) -> None:
super(PoseTrajectory3D, self).reduce_to_ids(ids)
self.timestamps = self.timestamps[ids]
def reduce_to_time_range(self,
start_timestamp: typing.Optional[float] = None,
end_timestamp: typing.Optional[float] = None):
"""
Removes elements with timestamps outside of the specified time range.
:param start_timestamp: any data with lower timestamp is removed
if None: current start timestamp
:param end_timestamp: any data with larger timestamp is removed
if None: current end timestamp
"""
if self.num_poses == 0:
raise TrajectoryException("trajectory is empty")
if start_timestamp is None:
start_timestamp = self.timestamps[0]
if end_timestamp is None:
end_timestamp = self.timestamps[-1]
if start_timestamp > end_timestamp:
raise TrajectoryException(
"start_timestamp is greater than end_timestamp "
"({} > {})".format(start_timestamp, end_timestamp))
ids = np.where(
np.logical_and(self.timestamps >= start_timestamp,
self.timestamps <= end_timestamp))[0]
self.reduce_to_ids(ids)
def check(self) -> typing.Tuple[bool, dict]:
if self.num_poses == 0:
return True, {}
valid, details = super(PoseTrajectory3D, self).check()
len_stamps_valid = (len(self.timestamps) == len(self.positions_xyz))
valid &= len_stamps_valid
details["nr. of stamps"] = "ok" if len_stamps_valid else "wrong"
stamps_ascending = bool(
np.all(np.sort(self.timestamps) == self.timestamps))
stamps_ascending &= np.unique(self.timestamps).size == len(
self.timestamps)
valid &= stamps_ascending
if stamps_ascending:
details["timestamps"] = "ok"
else:
details["timestamps"] = "wrong, not ascending or duplicates"
return valid, details
def get_infos(self) -> dict:
"""
:return: dictionary with some infos about the trajectory
"""
infos = super(PoseTrajectory3D, self).get_infos()
infos["duration (s)"] = self.timestamps[-1] - self.timestamps[0]
infos["t_start (s)"] = self.timestamps[0]
infos["t_end (s)"] = self.timestamps[-1]
return infos
def get_statistics(self) -> dict:
"""
:return: dictionary with some statistics of the trajectory
"""
if self.num_poses < 2:
return {}
stats = super(PoseTrajectory3D, self).get_statistics()
speeds = self.speeds
vmax = speeds.max()
vmin = speeds.min()
vmean = speeds.mean()
stats.update({
"v_max (m/s)": vmax,
"v_min (m/s)": vmin,
"v_avg (m/s)": vmean,
"v_max (km/h)": vmax * 3.6,
"v_min (km/h)": vmin * 3.6,
"v_avg (km/h)": vmean * 3.6
})
return stats
class Trajectory(PoseTrajectory3D):
pass # TODO compat
def calc_speed(xyz_1: np.ndarray, xyz_2: np.ndarray, t_1: float,
t_2: float) -> float:
"""
:param xyz_1: position at timestamp 1
:param xyz_2: position at timestamp 2
:param t_1: timestamp 1
:param t_2: timestamp 2
:return: speed in m/s
"""
if (t_2 - t_1) <= 0:
raise TrajectoryException("bad timestamps: " + str(t_1) + " & " +
str(t_2))
return float(np.linalg.norm(xyz_2 - xyz_1) / (t_2 - t_1))
def calc_angular_speed(p_1: np.ndarray, p_2: np.ndarray, t_1: float,
t_2: float, degrees: bool = False) -> float:
"""
:param p_1: pose at timestamp 1
:param p_2: pose at timestamp 2
:param t_1: timestamp 1
:param t_2: timestamp 2
:param degrees: set to True to return deg/s
:return: speed in rad/s
"""
if (t_2 - t_1) <= 0:
raise TrajectoryException("bad timestamps: " + str(t_1) + " & " +
str(t_2))
angle_1 = lie.so3_log(p_1[:3, :3], degrees)
angle_2 = lie.so3_log(p_2[:3, :3], degrees)
return (angle_2 - angle_1) / (t_2 - t_1)
def xyz_quat_wxyz_to_se3_poses(
xyz: np.ndarray, quat: np.ndarray) -> typing.Sequence[np.ndarray]:
poses = [
lie.se3(lie.so3_from_se3(tr.quaternion_matrix(quat)), xyz)
for quat, xyz in zip(quat, xyz)
]
return poses
def se3_poses_to_xyz_quat_wxyz(
poses: typing.Sequence[np.ndarray]
) -> typing.Tuple[np.ndarray, np.ndarray]:
xyz = np.array([pose[:3, 3] for pose in poses])
quat_wxyz = np.array([tr.quaternion_from_matrix(pose) for pose in poses])
return xyz, quat_wxyz
def merge(trajectories: typing.Sequence[PoseTrajectory3D]) -> PoseTrajectory3D:
"""
Merges multiple trajectories into a single, timestamp-sorted one.
:param trajectories: list of PoseTrajectory3D objects
:return: merged PoseTrajectory3D
"""
merged_stamps = np.concatenate([t.timestamps for t in trajectories])
merged_xyz = np.concatenate([t.positions_xyz for t in trajectories])
merged_quat = np.concatenate(
[t.orientations_quat_wxyz for t in trajectories])
order = merged_stamps.argsort()
merged_stamps = merged_stamps[order]
merged_xyz = merged_xyz[order]
merged_quat = merged_quat[order]
return PoseTrajectory3D(merged_xyz, merged_quat, merged_stamps)
| 19,102 | 37.359438 | 85 | py |
evo | evo-master/evo/core/transformations.py | # -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006-2015, Christoph Gohlke
# Copyright (c) 2006-2015, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2015.07.18
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.9 <http://www.numpy.org>`_
* `Transformations.c 2015.07.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
Other Python packages and modules for 3D transformations and quaternions:
* `Transforms3d <https://pypi.python.org/pypi/transforms3d>`_
includes most code of this module.
* `Blender.mathutils <http://www.blender.org/api/blender_python_api>`_
* `numpy-dtypes <https://github.com/numpy/numpy-dtypes>`_
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2015.07.18'
__docformat__ = 'restructuredtext en'
__all__ = ()
# TODO evo: added to suppress annoying warnings, see README.md
import warnings
warnings.filterwarnings("ignore", message="failed to import module _transformations")
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except ImportError:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
_import_module('_transformations')
if __name__ == "__main__":
import doctest
import random # used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
| 66,201 | 33.390649 | 85 | py |
evo | evo-master/evo/tools/__init__.py | 0 | 0 | 0 | py | |
evo | evo-master/evo/tools/file_interface.py | # -*- coding: UTF8 -*-
"""
Low- and high-level read/write functions for different file formats.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import binascii
import csv
import io
import json
import logging
import os
import typing
import zipfile
import numpy as np
from rosbags.rosbag1 import (Reader as Rosbag1Reader, Writer as Rosbag1Writer)
from rosbags.rosbag2 import (Reader as Rosbag2Reader, Writer as Rosbag2Writer)
from rosbags.serde import deserialize_cdr, ros1_to_cdr, serialize_cdr
from rosbags.serde.serdes import cdr_to_ros1
from evo import EvoException
import evo.core.lie_algebra as lie
import evo.core.transformations as tr
from evo.core import result
from evo.core.trajectory import PosePath3D, PoseTrajectory3D
from evo.tools import user, tf_id
logger = logging.getLogger(__name__)
SUPPORTED_ROS_MSGS = {
"geometry_msgs/msg/PoseStamped",
"geometry_msgs/msg/PoseWithCovarianceStamped",
"geometry_msgs/msg/TransformStamped", "nav_msgs/msg/Odometry"
}
class FileInterfaceException(EvoException):
pass
def has_utf8_bom(file_path):
"""
Checks if the given file starts with a UTF8 BOM
wikipedia.org/wiki/Byte_order_mark
"""
size_bytes = os.path.getsize(file_path)
if size_bytes < 3:
return False
with open(file_path, 'rb') as f:
return not int(binascii.hexlify(f.read(3)), 16) ^ 0xEFBBBF
def csv_read_matrix(file_path, delim=',', comment_str="#"):
"""
directly parse a csv-like file into a matrix
:param file_path: path of csv file (or file handle)
:param delim: delimiter character
:param comment_str: string indicating a comment line to ignore
:return: 2D list with raw data (string)
"""
if hasattr(file_path, 'read'): # if file handle
generator = (line for line in file_path
if not line.startswith(comment_str))
reader = csv.reader(generator, delimiter=delim)
mat = [row for row in reader]
else:
if not os.path.isfile(file_path):
raise FileInterfaceException("csv file " + str(file_path) +
" does not exist")
skip_3_bytes = has_utf8_bom(file_path)
with open(file_path) as f:
if skip_3_bytes:
f.seek(3)
generator = (line for line in f
if not line.startswith(comment_str))
reader = csv.reader(generator, delimiter=delim)
mat = [row for row in reader]
return mat
def read_tum_trajectory_file(file_path) -> PoseTrajectory3D:
"""
parses trajectory file in TUM format (timestamp tx ty tz qx qy qz qw)
:param file_path: the trajectory file path (or file handle)
:return: trajectory.PoseTrajectory3D object
"""
raw_mat = csv_read_matrix(file_path, delim=" ", comment_str="#")
error_msg = ("TUM trajectory files must have 8 entries per row "
"and no trailing delimiter at the end of the rows (space)")
if not raw_mat or (len(raw_mat) > 0 and len(raw_mat[0]) != 8):
raise FileInterfaceException(error_msg)
try:
mat = np.array(raw_mat).astype(float)
except ValueError:
raise FileInterfaceException(error_msg)
stamps = mat[:, 0] # n x 1
xyz = mat[:, 1:4] # n x 3
quat = mat[:, 4:] # n x 4
quat = np.roll(quat, 1, axis=1) # shift 1 column -> w in front column
if not hasattr(file_path, 'read'): # if not file handle
logger.debug("Loaded {} stamps and poses from: {}".format(
len(stamps), file_path))
return PoseTrajectory3D(xyz, quat, stamps)
def write_tum_trajectory_file(file_path, traj: PoseTrajectory3D,
confirm_overwrite: bool = False) -> None:
"""
:param file_path: desired text file for trajectory (string or handle)
:param traj: trajectory.PoseTrajectory3D
:param confirm_overwrite: whether to require user interaction
to overwrite existing files
"""
if isinstance(file_path, str) and confirm_overwrite:
if not user.check_and_confirm_overwrite(file_path):
return
if not isinstance(traj, PoseTrajectory3D):
raise FileInterfaceException(
"trajectory must be a PoseTrajectory3D object")
stamps = traj.timestamps
xyz = traj.positions_xyz
# shift -1 column -> w in back column
quat = np.roll(traj.orientations_quat_wxyz, -1, axis=1)
mat = np.column_stack((stamps, xyz, quat))
np.savetxt(file_path, mat, delimiter=" ")
if isinstance(file_path, str):
logger.info("Trajectory saved to: " + file_path)
def read_kitti_poses_file(file_path) -> PosePath3D:
"""
parses pose file in KITTI format (first 3 rows of SE(3) matrix per line)
:param file_path: the trajectory file path (or file handle)
:return: trajectory.PosePath3D
"""
raw_mat = csv_read_matrix(file_path, delim=" ", comment_str="#")
error_msg = ("KITTI pose files must have 12 entries per row "
"and no trailing delimiter at the end of the rows (space)")
if not raw_mat or (len(raw_mat) > 0 and len(raw_mat[0]) != 12):
raise FileInterfaceException(error_msg)
try:
mat = np.array(raw_mat).astype(float)
except ValueError:
raise FileInterfaceException(error_msg)
# yapf: disable
poses = [np.array([[r[0], r[1], r[2], r[3]],
[r[4], r[5], r[6], r[7]],
[r[8], r[9], r[10], r[11]],
[0, 0, 0, 1]]) for r in mat]
# yapf: enable
if not hasattr(file_path, 'read'): # if not file handle
logger.debug("Loaded {} poses from: {}".format(len(poses), file_path))
return PosePath3D(poses_se3=poses)
def write_kitti_poses_file(file_path, traj: PosePath3D,
confirm_overwrite: bool = False) -> None:
"""
:param file_path: desired text file for trajectory (string or handle)
:param traj: trajectory.PosePath3D or trajectory.PoseTrajectory3D
:param confirm_overwrite: whether to require user interaction
to overwrite existing files
"""
if isinstance(file_path, str) and confirm_overwrite:
if not user.check_and_confirm_overwrite(file_path):
return
# first 3 rows of SE(3) matrix flattened
poses_flat = [p.flatten()[:-4] for p in traj.poses_se3]
np.savetxt(file_path, poses_flat, delimiter=' ')
if isinstance(file_path, str):
logger.info("Poses saved to: " + file_path)
def read_euroc_csv_trajectory(file_path) -> PoseTrajectory3D:
"""
parses ground truth trajectory from EuRoC MAV state estimate .csv
:param file_path: <sequence>/mav0/state_groundtruth_estimate0/data.csv
:return: trajectory.PoseTrajectory3D object
"""
raw_mat = csv_read_matrix(file_path, delim=",", comment_str="#")
error_msg = (
"EuRoC format ground truth must have at least 8 entries per row "
"and no trailing delimiter at the end of the rows (comma)")
if not raw_mat or (len(raw_mat) > 0 and len(raw_mat[0]) < 8):
raise FileInterfaceException(error_msg)
try:
mat = np.array(raw_mat).astype(float)
except ValueError:
raise FileInterfaceException(error_msg)
stamps = np.divide(mat[:, 0], 1e9) # n x 1 - nanoseconds to seconds
xyz = mat[:, 1:4] # n x 3
quat = mat[:, 4:8] # n x 4
logger.debug("Loaded {} stamps and poses from: {}".format(
len(stamps), file_path))
return PoseTrajectory3D(xyz, quat, stamps)
def _get_xyz_quat_from_transform_stamped(
msg) -> typing.Tuple[typing.List[float], typing.List[float]]:
xyz = [
msg.transform.translation.x, msg.transform.translation.y,
msg.transform.translation.z
]
quat = [
msg.transform.rotation.w, msg.transform.rotation.x,
msg.transform.rotation.y, msg.transform.rotation.z
]
return xyz, quat
def _get_xyz_quat_from_pose_or_odometry_msg(
msg) -> typing.Tuple[typing.List[float], typing.List[float]]:
# Make nav_msgs/Odometry behave like geometry_msgs/PoseStamped.
while not hasattr(msg.pose, 'position') and not hasattr(
msg.pose, 'orientation'):
msg = msg.pose
xyz = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]
quat = [
msg.pose.orientation.w, msg.pose.orientation.x, msg.pose.orientation.y,
msg.pose.orientation.z
]
return xyz, quat
def get_supported_topics(
reader: typing.Union[Rosbag1Reader, Rosbag2Reader]) -> list:
"""
:param reader: opened bag reader (rosbags.rosbag2 or rosbags.rosbag1)
:return: list of ROS topics that are supported by this module
"""
return sorted([
c.topic for c in reader.connections if c.msgtype in SUPPORTED_ROS_MSGS
])
def read_bag_trajectory(reader: typing.Union[Rosbag1Reader,
Rosbag2Reader], topic: str,
cache_tf_tree: bool = False) -> PoseTrajectory3D:
"""
:param reader: opened bag reader (rosbags.rosbag2 or rosbags.rosbag1)
:param topic: trajectory topic of supported message type,
or a TF trajectory ID (e.g.: '/tf:map.base_link' )
:param cache_tf_tree: cache the tf tree. This speeds up the trajectory
reading in case multiple TF trajectories are loaded from
the same reader.
:return: trajectory.PoseTrajectory3D
"""
if not isinstance(reader, (Rosbag1Reader, Rosbag2Reader)):
raise FileInterfaceException(
"reader must be a rosbags.rosbags1.reader.Reader "
"or rosbags.rosbags2.reader.Reader - "
"rosbag.Bag() is not supported by evo anymore")
# TODO: Support TF also with ROS2 bags.
if tf_id.check_id(topic):
if isinstance(reader, Rosbag1Reader):
# Use TfCache instead if it's a TF transform ID.
from evo.tools import tf_cache
tf_tree_cache = (tf_cache.instance(reader.__hash__())
if cache_tf_tree else tf_cache.TfCache())
return tf_tree_cache.get_trajectory(reader, identifier=topic)
else:
raise FileInterfaceException(
"TF support for ROS2 bags is not implemented")
if topic not in reader.topics:
raise FileInterfaceException("no messages for topic '" + topic +
"' in bag")
msg_type = reader.topics[topic].msgtype
if msg_type not in SUPPORTED_ROS_MSGS:
raise FileInterfaceException(
"unsupported message type: {}".format(msg_type))
# Choose appropriate message conversion.
if msg_type == "geometry_msgs/msg/TransformStamped":
get_xyz_quat = _get_xyz_quat_from_transform_stamped
else:
get_xyz_quat = _get_xyz_quat_from_pose_or_odometry_msg
stamps, xyz, quat = [], [], []
connections = [c for c in reader.connections if c.topic == topic]
for connection, _, rawdata in reader.messages(
connections=connections): # type: ignore
if isinstance(reader, Rosbag1Reader):
msg = deserialize_cdr(ros1_to_cdr(rawdata, connection.msgtype),
connection.msgtype)
else:
msg = deserialize_cdr(rawdata, connection.msgtype)
# Use the header timestamps (converted to seconds).
# Note: msg/stamp is a rosbags type here, not native ROS.
t = msg.header.stamp
stamps.append(t.sec + (t.nanosec * 1e-9))
xyz_t, quat_t = get_xyz_quat(msg)
xyz.append(xyz_t)
quat.append(quat_t)
logger.debug("Loaded {} {} messages of topic: {}".format(
len(stamps), msg_type, topic))
# yapf: disable
(connection, _, rawdata) = list(reader.messages(connections=connections))[0] # type: ignore
# yapf: enable
if isinstance(reader, Rosbag1Reader):
first_msg = deserialize_cdr(ros1_to_cdr(rawdata, connection.msgtype),
connection.msgtype)
else:
first_msg = deserialize_cdr(rawdata, connection.msgtype)
frame_id = first_msg.header.frame_id
return PoseTrajectory3D(np.array(xyz), np.array(quat), np.array(stamps),
meta={"frame_id": frame_id})
def write_bag_trajectory(writer, traj: PoseTrajectory3D, topic_name: str,
frame_id: str = "") -> None:
"""
:param writer: opened bag writer (rosbags.rosbag2 or rosbags.rosbag1)
:param traj: trajectory.PoseTrajectory3D
:param topic_name: the desired topic name for the trajectory
:param frame_id: optional ROS frame_id
"""
from rosbags.typesys.types import (
geometry_msgs__msg__PoseStamped as PoseStamped, std_msgs__msg__Header
as Header, geometry_msgs__msg__Pose as Pose, geometry_msgs__msg__Point
as Position, geometry_msgs__msg__Quaternion as Quaternion,
builtin_interfaces__msg__Time as Time)
if not isinstance(traj, PoseTrajectory3D):
raise FileInterfaceException(
"trajectory must be a PoseTrajectory3D object")
if not isinstance(writer, (Rosbag1Writer, Rosbag2Writer)):
raise FileInterfaceException(
"writer must be a rosbags.rosbags1.writer.Writer "
"or rosbags.rosbags2.writer.Writer - "
"rosbag.Bag() is not supported by evo anymore")
msgtype = PoseStamped.__msgtype__
connection = writer.add_connection(topic_name, msgtype)
for stamp, xyz, quat in zip(traj.timestamps, traj.positions_xyz,
traj.orientations_quat_wxyz):
sec = int(stamp // 1)
nanosec = int((stamp - sec) * 1e9)
time = Time(sec, nanosec)
header = Header(time, frame_id)
position = Position(x=xyz[0], y=xyz[1], z=xyz[2])
quaternion = Quaternion(w=quat[0], x=quat[1], y=quat[2], z=quat[3])
pose = Pose(position, quaternion)
p = PoseStamped(header, pose)
serialized_msg = serialize_cdr(p, msgtype)
if isinstance(writer, Rosbag1Writer):
serialized_msg = cdr_to_ros1(serialized_msg, msgtype)
writer.write(connection, int(stamp * 1e9), serialized_msg)
logger.info("Saved geometry_msgs/PoseStamped topic: " + topic_name)
def save_res_file(zip_path, result_obj: result.Result,
confirm_overwrite: bool = False) -> None:
"""
save results to a zip file that can be deserialized with load_res_file()
:param zip_path: path to zip file (or file handle)
:param result_obj: evo.core.result.Result instance
:param confirm_overwrite: whether to require user interaction
to overwrite existing files
"""
if isinstance(zip_path, str):
logger.debug("Saving results to " + zip_path + "...")
if confirm_overwrite and not user.check_and_confirm_overwrite(zip_path):
return
with zipfile.ZipFile(zip_path, 'w') as archive:
archive.writestr("info.json", json.dumps(result_obj.info))
archive.writestr("stats.json", json.dumps(result_obj.stats))
for name, array in result_obj.np_arrays.items():
array_buffer = io.BytesIO()
np.save(array_buffer, array)
array_buffer.seek(0)
archive.writestr("{}.npy".format(name), array_buffer.read())
array_buffer.close()
for name, traj in result_obj.trajectories.items():
traj_buffer = io.StringIO()
if isinstance(traj, PoseTrajectory3D):
fmt_suffix = ".tum"
write_tum_trajectory_file(traj_buffer, traj)
elif isinstance(traj, PosePath3D):
fmt_suffix = ".kitti"
write_kitti_poses_file(traj_buffer, traj)
else:
raise FileInterfaceException(
"unknown format of trajectory {}".format(name))
traj_buffer.seek(0)
archive.writestr("{}{}".format(name, fmt_suffix),
traj_buffer.read().encode("utf-8"))
traj_buffer.close()
def load_res_file(zip_path, load_trajectories: bool = False) -> result.Result:
"""
load contents of a result .zip file saved with save_res_file(...)
:param zip_path: path to zip file
:param load_trajectories: set to True to load also the (backup) trajectories
:return: evo.core.result.Result instance
"""
logger.debug("Loading result from {} ...".format(zip_path))
result_obj = result.Result()
with zipfile.ZipFile(zip_path, mode='r') as archive:
file_list = archive.namelist()
if not {"info.json", "stats.json"} <= set(file_list):
raise FileInterfaceException(
"{} is not a valid result file".format(zip_path))
result_obj.info = json.loads(archive.read("info.json").decode("utf-8"))
result_obj.stats = json.loads(
archive.read("stats.json").decode("utf-8"))
# Compatibility: previous evo versions wrote .npz, although it was .npy
# In any case, np.load() supports both file formats.
np_files = [f for f in file_list if f.endswith((".npy", ".npz"))]
for filename in np_files:
with io.BytesIO(archive.read(filename)) as array_buffer:
array = np.load(array_buffer)
name = os.path.splitext(os.path.basename(filename))[0]
result_obj.add_np_array(name, array)
if load_trajectories:
tum_files = [f for f in file_list if f.endswith(".tum")]
for filename in tum_files:
with io.TextIOWrapper(archive.open(filename,
mode='r')) as traj_buffer:
traj = read_tum_trajectory_file(traj_buffer)
name = os.path.splitext(os.path.basename(filename))[0]
result_obj.add_trajectory(name, traj)
kitti_files = [f for f in file_list if f.endswith(".kitti")]
for filename in kitti_files:
with io.TextIOWrapper(archive.open(filename,
mode='r')) as path_buffer:
path = read_kitti_poses_file(path_buffer)
name = os.path.splitext(os.path.basename(filename))[0]
result_obj.add_trajectory(name, path)
return result_obj
def load_transform_json(json_path) -> np.ndarray:
"""
load a transformation stored in xyz + quaternion format in a .json file
:param json_path: path to the .json file
:return: t (SE(3) matrix)
"""
with open(json_path, 'r') as tf_file:
data = json.load(tf_file)
keys = ("x", "y", "z", "qx", "qy", "qz", "qw")
if not all(key in data for key in keys):
raise FileInterfaceException(
"invalid transform file - expected keys " + str(keys))
xyz = np.array([data["x"], data["y"], data["z"]])
quat = np.array([data["qw"], data["qx"], data["qy"], data["qz"]])
t = lie.se3(lie.so3_from_se3(tr.quaternion_matrix(quat)), xyz)
return t
| 19,748 | 40.929936 | 96 | py |
evo | evo-master/evo/tools/log.py | # -*- coding: UTF8 -*-
"""
utilities for the configuration of the package's loggers
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import sys
import typing
import colorama
from colorama import Fore
from evo.tools.settings import SETTINGS, GLOBAL_LOGFILE_PATH
colorama.init()
CONSOLE_ERROR_FMT = "{}[%(levelname)s]{} %(message)s".format(
Fore.LIGHTRED_EX, Fore.RESET)
CONSOLE_WARN_FMT = "{}[%(levelname)s]{} %(message)s".format(
Fore.LIGHTYELLOW_EX, Fore.RESET)
DEFAULT_LONG_FMT = "[%(levelname)s][%(asctime)s][%(module)s.%(funcName)s():%(lineno)s]\n%(message)s"
class ConsoleFormatter(logging.Formatter):
def __init__(self, fmt="%(msg)s"):
super(ConsoleFormatter, self).__init__(fmt)
self.critical_fmt = CONSOLE_ERROR_FMT
self.error_fmt = CONSOLE_ERROR_FMT
self.warning_fmt = CONSOLE_WARN_FMT
self.info_fmt = fmt
self.debug_fmt = fmt
def format(self, record):
if record.levelno == logging.CRITICAL:
self._fmt = self.error_fmt
elif record.levelno == logging.ERROR:
self._fmt = self.error_fmt
elif record.levelno == logging.WARNING:
self._fmt = self.warning_fmt
elif record.levelno == logging.INFO:
self._fmt = self.info_fmt
elif record.levelno == logging.DEBUG:
self._fmt = self.debug_fmt
self._style._fmt = self._fmt
result = logging.Formatter.format(self, record)
return result
# configures the package's root logger (see __init__.py)
def configure_logging(verbose: bool = False, silent: bool = False,
debug: bool = False,
console_fmt: typing.Optional[str] = None,
file_fmt: str = DEFAULT_LONG_FMT,
local_logfile: typing.Optional[str] = None) -> None:
logger = logging.getLogger("evo")
logger.setLevel(logging.DEBUG)
logger.propagate = False
if len(logger.handlers) > 0:
logger.removeHandler(logger.handlers[0])
logfiles = []
if SETTINGS.global_logfile_enabled:
logfiles.append(GLOBAL_LOGFILE_PATH)
if local_logfile is not None:
logfiles.append(local_logfile)
for logfile in logfiles:
file_handler = logging.FileHandler(logfile)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(file_fmt))
logger.addHandler(file_handler)
if debug or verbose:
console_level = logging.DEBUG
elif silent:
console_level = logging.WARNING
else:
console_level = logging.INFO
if debug:
console_fmt = DEFAULT_LONG_FMT
elif console_fmt is None:
console_fmt = SETTINGS.console_logging_format
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(console_level)
console_handler.setFormatter(ConsoleFormatter(console_fmt))
logger.addHandler(console_handler)
# log header for debug mode
if debug:
import getpass as gp
import platform as pf
logger.debug(
"System info:\nPython {pyversion}\n{platform}\n{user}\n".format(
pyversion=pf.python_version(), platform=pf.platform(),
user=gp.getuser() + "@" + pf.node()))
| 3,916 | 32.767241 | 100 | py |
evo | evo-master/evo/tools/pandas_bridge.py | # -*- coding: UTF8 -*-
"""
translate between evo and Pandas types
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import os
import typing
import numpy as np
import pandas as pd
from evo.core import trajectory, result
from evo.tools import file_interface, user
from evo.tools.settings import SETTINGS
logger = logging.getLogger(__name__)
def trajectory_to_df(traj: trajectory.PosePath3D) -> pd.DataFrame:
if not isinstance(traj, trajectory.PosePath3D):
raise TypeError("trajectory.PosePath3D or derived required")
poses_dict = {
"x": traj.positions_xyz[:, 0],
"y": traj.positions_xyz[:, 1],
"z": traj.positions_xyz[:, 2],
"qw": traj.orientations_quat_wxyz[:, 0],
"qx": traj.orientations_quat_wxyz[:, 1],
"qy": traj.orientations_quat_wxyz[:, 2],
"qz": traj.orientations_quat_wxyz[:, 3],
}
if isinstance(traj, trajectory.PoseTrajectory3D):
index = traj.timestamps
else:
index = np.arange(0, traj.num_poses)
return pd.DataFrame(data=poses_dict, index=index)
def trajectory_stats_to_df(traj: trajectory.PosePath3D,
name: typing.Optional[str] = None) -> pd.DataFrame:
if not isinstance(traj, trajectory.PosePath3D):
raise TypeError("trajectory.PosePath3D or derived required")
data_dict = {k: v for k, v in traj.get_infos().items() if np.isscalar(v)}
data_dict.update(traj.get_statistics())
index = [name] if name else ['0']
return pd.DataFrame(data=data_dict, index=index)
def trajectories_stats_to_df(
trajectories: typing.Dict[str, trajectory.PosePath3D]) -> pd.DataFrame:
df = pd.DataFrame()
for name, traj in trajectories.items():
df = pd.concat((df, trajectory_stats_to_df(traj, name)))
return df
def result_to_df(result_obj: result.Result,
label: typing.Optional[str] = None) -> pd.DataFrame:
if not isinstance(result_obj, result.Result):
raise TypeError("result.Result or derived required")
data = {
"info": result_obj.info,
"stats": result_obj.stats,
"np_arrays": {},
"trajectories": {}
}
for name, array in result_obj.np_arrays.items():
data["np_arrays"][name] = array
if label is None and "est_name" in data["info"]:
label = os.path.basename(data["info"]["est_name"])
elif label is None:
label = "unnamed_result"
df = pd.DataFrame(data=data)
if df.empty:
raise ValueError("cannot create a dataframe from an empty result")
return df.T.stack().to_frame(name=label)
def save_df_as_table(df: pd.DataFrame, path: str,
format_str: str = SETTINGS.table_export_format,
transpose: str = SETTINGS.table_export_transpose,
confirm_overwrite: bool = False) -> None:
if confirm_overwrite and not user.check_and_confirm_overwrite(path):
return
if transpose:
df = df.T
if format_str == "excel":
# requires xlwt and/or openpyxl to be installed
with pd.ExcelWriter(path) as writer:
df.to_excel(writer)
else:
getattr(df, "to_" + format_str)(path)
logger.debug("{} table saved to: {}".format(format_str, path))
def load_results_as_dataframe(result_files: typing.Iterable[str],
use_filenames: bool = False,
merge: bool = False) -> pd.DataFrame:
"""
Load multiple result files into a MultiIndex dataframe.
:param result_files: result files to load
:param use_filenames: use the result filename as label instead of
the 'est_name' label from the result's info
:param merge: merge all results into an average result
"""
if merge:
results = [file_interface.load_res_file(f) for f in result_files]
return result_to_df(result.merge_results(results))
df = pd.DataFrame()
for result_file in result_files:
result_obj = file_interface.load_res_file(result_file)
name = result_file if use_filenames else None
df = pd.concat([df, result_to_df(result_obj, name)], axis="columns")
return df
| 4,826 | 35.568182 | 79 | py |
evo | evo-master/evo/tools/plot.py | # -*- coding: UTF8 -*-
"""
some plotting functionality for different tasks
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
import os
import collections
import collections.abc
import logging
import pickle
import typing
from enum import Enum
import matplotlib as mpl
from evo.tools.settings import SETTINGS
mpl.use(SETTINGS.plot_backend)
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.art3d as art3d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.collections import LineCollection
from matplotlib.transforms import Affine2D, Bbox
import numpy as np
import seaborn as sns
from evo import EvoException
from evo.tools import user
from evo.core import trajectory
# configure matplotlib and seaborn according to package settings
# TODO: 'color_codes=False' to work around this bug:
# https://github.com/mwaskom/seaborn/issues/1546
sns.set(style=SETTINGS.plot_seaborn_style, font=SETTINGS.plot_fontfamily,
font_scale=SETTINGS.plot_fontscale, color_codes=False,
palette=SETTINGS.plot_seaborn_palette)
rc = {
"lines.linewidth": SETTINGS.plot_linewidth,
"text.usetex": SETTINGS.plot_usetex,
"font.family": SETTINGS.plot_fontfamily,
"pgf.texsystem": SETTINGS.plot_texsystem
}
mpl.rcParams.update(rc)
logger = logging.getLogger(__name__)
ListOrArray = typing.Union[typing.Sequence[float], np.ndarray]
class PlotException(EvoException):
pass
class PlotMode(Enum):
xy = "xy"
xz = "xz"
yx = "yx"
yz = "yz"
zx = "zx"
zy = "zy"
xyz = "xyz"
class Viewport(Enum):
update = "update"
keep_unchanged = "keep_unchanged"
zoom_to_map = "zoom_to_map"
class PlotCollection:
def __init__(self, title: str = "",
deserialize: typing.Optional[str] = None):
self.title = " ".join(title.splitlines()) # one line title
self.figures = collections.OrderedDict() # remember placement order
# hack to avoid premature garbage collection when serializing with Qt
# initialized later in tabbed_{qt, tk}_window
self.root_window: typing.Optional[typing.Any] = None
if deserialize is not None:
logger.debug("Deserializing PlotCollection from " + deserialize +
"...")
self.figures = pickle.load(open(deserialize, 'rb'))
def __str__(self) -> str:
return self.title + " (" + str(len(self.figures)) + " figure(s))"
def add_figure(self, name: str, fig: plt.Figure) -> None:
fig.tight_layout()
self.figures[name] = fig
@staticmethod
def _bind_mouse_events_to_canvas(axes: Axes3D, canvas: FigureCanvasBase):
axes.mouse_init()
# Event binding was possible through mouse_init() up to matplotlib 3.2.
# In 3.3.0 this was moved, so we are forced to do it here.
if mpl.__version__ >= "3.3.0":
canvas.mpl_connect("button_press_event", axes._button_press)
canvas.mpl_connect("button_release_event", axes._button_release)
canvas.mpl_connect("motion_notify_event", axes._on_move)
def tabbed_qt5_window(self) -> None:
from PyQt5 import QtGui, QtWidgets
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg,
NavigationToolbar2QT)
# mpl backend can already create instance
# https://stackoverflow.com/a/40031190
app = QtGui.QGuiApplication.instance()
if app is None:
app = QtWidgets.QApplication([self.title])
self.root_window = QtWidgets.QTabWidget()
self.root_window.setWindowTitle(self.title)
sizes = [(0, 0)]
for name, fig in self.figures.items():
tab = QtWidgets.QWidget(self.root_window)
tab.canvas = FigureCanvasQTAgg(fig)
vbox = QtWidgets.QVBoxLayout(tab)
vbox.addWidget(tab.canvas)
toolbar = NavigationToolbar2QT(tab.canvas, tab)
vbox.addWidget(toolbar)
tab.setLayout(vbox)
for axes in fig.get_axes():
if isinstance(axes, Axes3D):
# must explicitly allow mouse dragging for 3D plots
self._bind_mouse_events_to_canvas(axes, tab.canvas)
self.root_window.addTab(tab, name)
sizes.append(tab.canvas.get_width_height())
# Resize window to avoid clipped axes.
self.root_window.resize(*max(sizes))
self.root_window.show()
app.exec_()
def tabbed_tk_window(self) -> None:
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
import tkinter
from tkinter import ttk
self.root_window = tkinter.Tk()
self.root_window.title(self.title)
# quit if the window is deleted
self.root_window.protocol("WM_DELETE_WINDOW", self.root_window.quit)
nb = ttk.Notebook(self.root_window)
nb.grid(row=1, column=0, sticky='NESW')
for name, fig in self.figures.items():
fig.tight_layout()
tab = ttk.Frame(nb)
canvas = FigureCanvasTkAgg(self.figures[name], master=tab)
canvas.draw()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH,
expand=True)
toolbar = NavigationToolbar2Tk(canvas, tab)
toolbar.update()
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH,
expand=True)
for axes in fig.get_axes():
if isinstance(axes, Axes3D):
# must explicitly allow mouse dragging for 3D plots
self._bind_mouse_events_to_canvas(axes, canvas)
nb.add(tab, text=name)
nb.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=True)
self.root_window.mainloop()
self.root_window.destroy()
def show(self) -> None:
if len(self.figures.keys()) == 0:
return
if not SETTINGS.plot_split:
if SETTINGS.plot_backend.lower() == "qt5agg":
self.tabbed_qt5_window()
elif SETTINGS.plot_backend.lower() == "tkagg":
self.tabbed_tk_window()
else:
plt.show()
else:
plt.show()
def close(self) -> None:
for name, fig in self.figures.items():
plt.close(fig)
def serialize(self, dest: str, confirm_overwrite: bool = True) -> None:
logger.debug("Serializing PlotCollection to " + dest + "...")
if confirm_overwrite and not user.check_and_confirm_overwrite(dest):
return
else:
pickle.dump(self.figures, open(dest, 'wb'))
def export(self, file_path: str, confirm_overwrite: bool = True) -> None:
base, ext = os.path.splitext(file_path)
if ext == ".pdf" and not SETTINGS.plot_split:
if confirm_overwrite and not user.check_and_confirm_overwrite(
file_path):
return
import matplotlib.backends.backend_pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(file_path)
for name, fig in self.figures.items():
# fig.tight_layout() # TODO
pdf.savefig(fig)
pdf.close()
logger.info("Plots saved to " + file_path)
else:
for name, fig in self.figures.items():
dest = base + '_' + name + ext
if confirm_overwrite and not user.check_and_confirm_overwrite(
dest):
return
fig.tight_layout()
fig.savefig(dest)
logger.info("Plot saved to " + dest)
def set_aspect_equal(ax: plt.Axes) -> None:
"""
kudos to https://stackoverflow.com/a/35126679
:param ax: matplotlib 3D axes object
"""
if not isinstance(ax, Axes3D):
ax.set_aspect("equal")
return
xlim = ax.get_xlim3d()
ylim = ax.get_ylim3d()
zlim = ax.get_zlim3d()
from numpy import mean
xmean = mean(xlim)
ymean = mean(ylim)
zmean = mean(zlim)
plot_radius = max([
abs(lim - mean_)
for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))
for lim in lims
])
ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
def prepare_axis(fig: plt.Figure, plot_mode: PlotMode = PlotMode.xy,
subplot_arg: int = 111) -> plt.Axes:
"""
prepares an axis according to the plot mode (for trajectory plotting)
:param fig: matplotlib figure object
:param plot_mode: PlotMode
:param subplot_arg: optional if using subplots - the subplot id (e.g. '122')
:return: the matplotlib axis
"""
if plot_mode == PlotMode.xyz:
ax = fig.add_subplot(subplot_arg, projection="3d")
else:
ax = fig.add_subplot(subplot_arg)
if plot_mode in {PlotMode.xy, PlotMode.xz, PlotMode.xyz}:
xlabel = "$x$ (m)"
elif plot_mode in {PlotMode.yz, PlotMode.yx}:
xlabel = "$y$ (m)"
else:
xlabel = "$z$ (m)"
if plot_mode in {PlotMode.xy, PlotMode.zy, PlotMode.xyz}:
ylabel = "$y$ (m)"
elif plot_mode in {PlotMode.zx, PlotMode.yx}:
ylabel = "$x$ (m)"
else:
ylabel = "$z$ (m)"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if plot_mode == PlotMode.xyz:
ax.set_zlabel('$z$ (m)')
if SETTINGS.plot_invert_xaxis:
plt.gca().invert_xaxis()
if SETTINGS.plot_invert_yaxis:
plt.gca().invert_yaxis()
if not SETTINGS.plot_show_axis:
ax.set_axis_off()
return ax
def plot_mode_to_idx(
plot_mode: PlotMode) -> typing.Tuple[int, int, typing.Optional[int]]:
if plot_mode == PlotMode.xy or plot_mode == PlotMode.xyz:
x_idx = 0
y_idx = 1
elif plot_mode == PlotMode.xz:
x_idx = 0
y_idx = 2
elif plot_mode == PlotMode.yx:
x_idx = 1
y_idx = 0
elif plot_mode == PlotMode.yz:
x_idx = 1
y_idx = 2
elif plot_mode == PlotMode.zx:
x_idx = 2
y_idx = 0
elif plot_mode == PlotMode.zy:
x_idx = 2
y_idx = 1
z_idx = 2 if plot_mode == PlotMode.xyz else None
return x_idx, y_idx, z_idx
def add_start_end_markers(ax: plt.Axes, plot_mode: PlotMode,
traj: trajectory.PosePath3D, start_symbol: str = "o",
start_color: str = "black", end_symbol: str = "x",
end_color: str = "black", alpha: float = 1.0,
traj_name: typing.Optional[str] = None):
if traj.num_poses == 0:
return
start = traj.positions_xyz[0]
end = traj.positions_xyz[-1]
x_idx, y_idx, z_idx = plot_mode_to_idx(plot_mode)
start_coords = [start[x_idx], start[y_idx]]
end_coords = [end[x_idx], end[y_idx]]
if plot_mode == PlotMode.xyz:
start_coords.append(start[z_idx])
end_coords.append(end[z_idx])
start_label = f"Start of {traj_name}" if traj_name else None
end_label = f"End of {traj_name}" if traj_name else None
ax.scatter(*start_coords, marker=start_symbol, color=start_color,
alpha=alpha, label=start_label)
ax.scatter(*end_coords, marker=end_symbol, color=end_color, alpha=alpha,
label=end_label)
def traj(ax: plt.Axes, plot_mode: PlotMode, traj: trajectory.PosePath3D,
style: str = '-', color: str = 'black', label: str = "",
alpha: float = 1.0, plot_start_end_markers: bool = False) -> None:
"""
plot a path/trajectory based on xyz coordinates into an axis
:param ax: the matplotlib axis
:param plot_mode: PlotMode
:param traj: trajectory.PosePath3D or trajectory.PoseTrajectory3D object
:param style: matplotlib line style
:param color: matplotlib color
:param label: label (for legend)
:param alpha: alpha value for transparency
:param plot_start_end_markers: Mark the start and end of a trajectory
with a symbol.
"""
x_idx, y_idx, z_idx = plot_mode_to_idx(plot_mode)
x = traj.positions_xyz[:, x_idx]
y = traj.positions_xyz[:, y_idx]
if plot_mode == PlotMode.xyz:
z = traj.positions_xyz[:, z_idx]
ax.plot(x, y, z, style, color=color, label=label, alpha=alpha)
else:
ax.plot(x, y, style, color=color, label=label, alpha=alpha)
if SETTINGS.plot_xyz_realistic:
set_aspect_equal(ax)
if label and SETTINGS.plot_show_legend:
ax.legend(frameon=True)
if plot_start_end_markers:
add_start_end_markers(ax, plot_mode, traj, start_color=color,
end_color=color, alpha=alpha)
def colored_line_collection(
xyz: np.ndarray, colors: ListOrArray, plot_mode: PlotMode = PlotMode.xy,
linestyles: str = "solid", step: int = 1, alpha: float = 1.
) -> typing.Union[LineCollection, art3d.LineCollection]:
if step > 1 and len(xyz) / step != len(colors):
raise PlotException(
"color values don't have correct length: %d vs. %d" %
(len(xyz) / step, len(colors)))
x_idx, y_idx, z_idx = plot_mode_to_idx(plot_mode)
xs = [[x_1, x_2]
for x_1, x_2 in zip(xyz[:-1:step, x_idx], xyz[1::step, x_idx])]
ys = [[x_1, x_2]
for x_1, x_2 in zip(xyz[:-1:step, y_idx], xyz[1::step, y_idx])]
if plot_mode == PlotMode.xyz:
zs = [[x_1, x_2]
for x_1, x_2 in zip(xyz[:-1:step, z_idx], xyz[1::step, z_idx])]
segs_3d = [list(zip(x, y, z)) for x, y, z in zip(xs, ys, zs)]
line_collection = art3d.Line3DCollection(segs_3d, colors=colors,
alpha=alpha,
linestyles=linestyles)
else:
segs_2d = [list(zip(x, y)) for x, y in zip(xs, ys)]
line_collection = LineCollection(segs_2d, colors=colors, alpha=alpha,
linestyle=linestyles)
return line_collection
def traj_colormap(ax: plt.Axes, traj: trajectory.PosePath3D,
array: ListOrArray, plot_mode: PlotMode, min_map: float,
max_map: float, title: str = "",
fig: typing.Optional[mpl.figure.Figure] = None,
plot_start_end_markers: bool = False) -> None:
"""
color map a path/trajectory in xyz coordinates according to
an array of values
:param ax: plot axis
:param traj: trajectory.PosePath3D or trajectory.PoseTrajectory3D object
:param array: Nx1 array of values used for color mapping
:param plot_mode: PlotMode
:param min_map: lower bound value for color mapping
:param max_map: upper bound value for color mapping
:param title: plot title
:param fig: plot figure. Obtained with plt.gcf() if none is specified
:param plot_start_end_markers: Mark the start and end of a trajectory
with a symbol.
"""
pos = traj.positions_xyz
norm = mpl.colors.Normalize(vmin=min_map, vmax=max_map, clip=True)
mapper = cm.ScalarMappable(
norm=norm,
cmap=SETTINGS.plot_trajectory_cmap) # cm.*_r is reversed cmap
mapper.set_array(array)
colors = [mapper.to_rgba(a) for a in array]
line_collection = colored_line_collection(pos, colors, plot_mode)
ax.add_collection(line_collection)
ax.autoscale_view(True, True, True)
if plot_mode == PlotMode.xyz:
ax.set_zlim(np.amin(traj.positions_xyz[:, 2]),
np.amax(traj.positions_xyz[:, 2]))
if SETTINGS.plot_xyz_realistic:
set_aspect_equal(ax)
if fig is None:
fig = plt.gcf()
cbar = fig.colorbar(
mapper, ticks=[min_map, (max_map - (max_map - min_map) / 2), max_map],
ax=ax)
cbar.ax.set_yticklabels([
"{0:0.3f}".format(min_map),
"{0:0.3f}".format(max_map - (max_map - min_map) / 2),
"{0:0.3f}".format(max_map)
])
if title:
ax.legend(frameon=True)
ax.set_title(title)
if plot_start_end_markers:
add_start_end_markers(ax, plot_mode, traj, start_color=colors[0],
end_color=colors[-1])
def draw_coordinate_axes(ax: plt.Figure, traj: trajectory.PosePath3D,
plot_mode: PlotMode, marker_scale: float = 0.1,
x_color: str = "r", y_color: str = "g",
z_color: str = "b") -> None:
"""
Draws a coordinate frame axis for each pose of a trajectory.
:param ax: plot axis
:param traj: trajectory.PosePath3D or trajectory.PoseTrajectory3D object
:param plot_mode: PlotMode value
:param marker_scale: affects the size of the marker (1. * marker_scale)
:param x_color: color of the x-axis
:param y_color: color of the y-axis
:param z_color: color of the z-axis
"""
if marker_scale <= 0:
return
unit_x = np.array([1 * marker_scale, 0, 0, 1])
unit_y = np.array([0, 1 * marker_scale, 0, 1])
unit_z = np.array([0, 0, 1 * marker_scale, 1])
# Transform start/end vertices of each axis to global frame.
x_vertices = np.array([[p[:3, 3], p.dot(unit_x)[:3]]
for p in traj.poses_se3])
y_vertices = np.array([[p[:3, 3], p.dot(unit_y)[:3]]
for p in traj.poses_se3])
z_vertices = np.array([[p[:3, 3], p.dot(unit_z)[:3]]
for p in traj.poses_se3])
n = traj.num_poses
# Concatenate all line segment vertices in order x, y, z.
vertices = np.concatenate((x_vertices, y_vertices, z_vertices)).reshape(
(n * 2 * 3, 3))
# Concatenate all colors per line segment in order x, y, z.
colors = np.array(n * [x_color] + n * [y_color] + n * [z_color])
markers = colored_line_collection(vertices, colors, plot_mode, step=2)
ax.add_collection(markers)
def draw_correspondence_edges(ax: plt.Axes, traj_1: trajectory.PosePath3D,
traj_2: trajectory.PosePath3D,
plot_mode: PlotMode, style: str = '-',
color: str = "black", alpha: float = 1.) -> None:
"""
Draw edges between corresponding poses of two trajectories.
Trajectories must be synced, i.e. having the same number of poses.
:param ax: plot axis
:param traj_{1,2}: trajectory.PosePath3D or trajectory.PoseTrajectory3D
:param plot_mode: PlotMode value
:param style: matplotlib line style
:param color: matplotlib color
:param alpha: alpha value for transparency
"""
if not traj_1.num_poses == traj_2.num_poses:
raise PlotException(
"trajectories must have same length to draw pose correspondences"
" - try to synchronize them first")
n = traj_1.num_poses
interweaved_positions = np.empty((n * 2, 3))
interweaved_positions[0::2, :] = traj_1.positions_xyz
interweaved_positions[1::2, :] = traj_2.positions_xyz
colors = np.array(n * [color])
markers = colored_line_collection(interweaved_positions, colors, plot_mode,
step=2, alpha=alpha, linestyles=style)
ax.add_collection(markers)
def traj_xyz(axarr: np.ndarray, traj: trajectory.PosePath3D, style: str = '-',
color: str = 'black', label: str = "", alpha: float = 1.0,
start_timestamp: typing.Optional[float] = None) -> None:
"""
plot a path/trajectory based on xyz coordinates into an axis
:param axarr: an axis array (for x, y & z)
e.g. from 'fig, axarr = plt.subplots(3)'
:param traj: trajectory.PosePath3D or trajectory.PoseTrajectory3D object
:param style: matplotlib line style
:param color: matplotlib color
:param label: label (for legend)
:param alpha: alpha value for transparency
:param start_timestamp: optional start time of the reference
(for x-axis alignment)
"""
if len(axarr) != 3:
raise PlotException("expected an axis array with 3 subplots - got " +
str(len(axarr)))
if isinstance(traj, trajectory.PoseTrajectory3D):
if start_timestamp:
x = traj.timestamps - start_timestamp
else:
x = traj.timestamps
xlabel = "$t$ (s)"
else:
x = np.arange(0., len(traj.positions_xyz))
xlabel = "index"
ylabels = ["$x$ (m)", "$y$ (m)", "$z$ (m)"]
for i in range(0, 3):
axarr[i].plot(x, traj.positions_xyz[:, i], style, color=color,
label=label, alpha=alpha)
axarr[i].set_ylabel(ylabels[i])
axarr[2].set_xlabel(xlabel)
if label:
axarr[0].legend(frameon=True)
def traj_rpy(axarr: np.ndarray, traj: trajectory.PosePath3D, style: str = '-',
color: str = 'black', label: str = "", alpha: float = 1.0,
start_timestamp: typing.Optional[float] = None) -> None:
"""
plot a path/trajectory's Euler RPY angles into an axis
:param axarr: an axis array (for R, P & Y)
e.g. from 'fig, axarr = plt.subplots(3)'
:param traj: trajectory.PosePath3D or trajectory.PoseTrajectory3D object
:param style: matplotlib line style
:param color: matplotlib color
:param label: label (for legend)
:param alpha: alpha value for transparency
:param start_timestamp: optional start time of the reference
(for x-axis alignment)
"""
if len(axarr) != 3:
raise PlotException("expected an axis array with 3 subplots - got " +
str(len(axarr)))
angles = traj.get_orientations_euler(SETTINGS.euler_angle_sequence)
if isinstance(traj, trajectory.PoseTrajectory3D):
if start_timestamp:
x = traj.timestamps - start_timestamp
else:
x = traj.timestamps
xlabel = "$t$ (s)"
else:
x = np.arange(0., len(angles))
xlabel = "index"
ylabels = ["$roll$ (deg)", "$pitch$ (deg)", "$yaw$ (deg)"]
for i in range(0, 3):
axarr[i].plot(x, np.rad2deg(angles[:, i]), style, color=color,
label=label, alpha=alpha)
axarr[i].set_ylabel(ylabels[i])
axarr[2].set_xlabel(xlabel)
if label:
axarr[0].legend(frameon=True)
def trajectories(fig: plt.Figure, trajectories: typing.Union[
trajectory.PosePath3D, typing.Sequence[trajectory.PosePath3D],
typing.Dict[str, trajectory.PosePath3D]], plot_mode=PlotMode.xy,
title: str = "", subplot_arg: int = 111,
plot_start_end_markers: bool = False) -> None:
"""
high-level function for plotting multiple trajectories
:param fig: matplotlib figure
:param trajectories: instance or container of PosePath3D or derived
- if it's a dictionary, the keys (names) will be used as labels
:param plot_mode: e.g. plot.PlotMode.xy
:param title: optional plot title
:param subplot_arg: optional matplotlib subplot ID if used as subplot
:param plot_start_end_markers: Mark the start and end of a trajectory
with a symbol.
"""
ax = prepare_axis(fig, plot_mode, subplot_arg)
if title:
ax.set_title(title)
cmap_colors = None
if SETTINGS.plot_multi_cmap.lower() != "none" and isinstance(
trajectories, collections.abc.Iterable):
cmap = getattr(cm, SETTINGS.plot_multi_cmap)
cmap_colors = iter(cmap(np.linspace(0, 1, len(trajectories))))
# helper function
def draw(t, name=""):
if cmap_colors is None:
color = next(ax._get_lines.prop_cycler)['color']
else:
color = next(cmap_colors)
if SETTINGS.plot_usetex:
name = name.replace("_", "\\_")
traj(ax, plot_mode, t, '-', color, name,
plot_start_end_markers=plot_start_end_markers)
if isinstance(trajectories, trajectory.PosePath3D):
draw(trajectories)
elif isinstance(trajectories, dict):
for name, t in trajectories.items():
draw(t, name)
else:
for t in trajectories:
draw(t)
def error_array(ax: plt.Axes, err_array: ListOrArray,
x_array: typing.Optional[ListOrArray] = None,
statistics: typing.Optional[typing.Dict[str, float]] = None,
threshold: typing.Optional[float] = None,
cumulative: bool = False, color: str = 'grey',
name: str = "error", title: str = "", xlabel: str = "index",
ylabel: typing.Optional[str] = None, subplot_arg: int = 111,
linestyle: str = "-", marker: typing.Optional[str] = None):
"""
high-level function for plotting raw error values of a metric
:param fig: matplotlib axes
:param err_array: an nx1 array of values
:param x_array: an nx1 array of x-axis values
:param statistics: optional dictionary of {metrics.StatisticsType.value: value}
:param threshold: optional value for horizontal threshold line
:param cumulative: set to True for cumulative plot
:param name: optional name of the value array
:param title: optional plot title
:param xlabel: optional x-axis label
:param ylabel: optional y-axis label
:param subplot_arg: optional matplotlib subplot ID if used as subplot
:param linestyle: matplotlib linestyle
:param marker: optional matplotlib marker style for points
"""
if cumulative:
if x_array is not None:
ax.plot(x_array, np.cumsum(err_array), linestyle=linestyle,
marker=marker, color=color, label=name)
else:
ax.plot(np.cumsum(err_array), linestyle=linestyle, marker=marker,
color=color, label=name)
else:
if x_array is not None:
ax.plot(x_array, err_array, linestyle=linestyle, marker=marker,
color=color, label=name)
else:
ax.plot(err_array, linestyle=linestyle, marker=marker, color=color,
label=name)
if statistics is not None:
for stat_name, value in statistics.items():
color = next(ax._get_lines.prop_cycler)['color']
if stat_name == "std" and "mean" in statistics:
mean, std = statistics["mean"], statistics["std"]
ax.axhspan(mean - std / 2, mean + std / 2, color=color,
alpha=0.5, label=stat_name)
else:
ax.axhline(y=value, color=color, linewidth=2.0,
label=stat_name)
if threshold is not None:
ax.axhline(y=threshold, color='red', linestyle='dashed', linewidth=2.0,
label="threshold")
plt.ylabel(ylabel if ylabel else name)
plt.xlabel(xlabel)
plt.title(title)
plt.legend(frameon=True)
def ros_map(
ax: plt.Axes, yaml_path: str, plot_mode: PlotMode,
cmap: str = SETTINGS.ros_map_cmap,
mask_unknown_value: typing.Optional[int] = (
SETTINGS.ros_map_unknown_cell_value if SETTINGS.ros_map_enable_masking
else None), alpha: float = SETTINGS.ros_map_alpha_value,
viewport: Viewport = Viewport(SETTINGS.ros_map_viewport)
) -> None:
"""
Inserts an image of an 2D ROS map into the plot axis.
See: http://wiki.ros.org/map_server#Map_format
:param ax: 2D matplotlib axes
:param plot_mode: a 2D PlotMode
:param yaml_path: yaml file that contains the metadata of the map image
:param cmap: color map used to map scalar data to colors
(only for single channel image)
:param mask_unknown_value: uint8 value that represents unknown cells.
If specified, these cells will be masked out.
If set to None or False, nothing will be masked.
:param viewport: Viewport defining how the axis limits will be changed
"""
import yaml
if isinstance(ax, Axes3D):
raise PlotException("ros_map can't be drawn into a 3D axis")
if plot_mode in {PlotMode.xz, PlotMode.yz, PlotMode.zx, PlotMode.zy}:
# Image lies in xy / yx plane, nothing to see here.
return
x_idx, y_idx, _ = plot_mode_to_idx(plot_mode)
with open(yaml_path) as f:
metadata = yaml.safe_load(f)
# Load map image, mask unknown cells if desired.
image_path = metadata["image"]
if not os.path.isabs(image_path):
image_path = os.path.join(os.path.dirname(yaml_path), image_path)
image = plt.imread(image_path)
if mask_unknown_value:
# Support masking with single channel or RGB images, 8bit or normalized
# float. For RGB all channels must be equal to mask_unknown_value.
n_channels = image.shape[2] if len(image.shape) > 2 else 1
if image.dtype == np.uint8:
mask_unknown_value_rgb = np.array([mask_unknown_value] * 3,
dtype=np.uint8)
elif image.dtype == np.float32:
mask_unknown_value_rgb = np.array([mask_unknown_value / 255.0] * 3,
dtype=np.float32)
if n_channels == 1:
image = np.ma.masked_where(image == mask_unknown_value_rgb[0],
image)
elif n_channels == 3:
# imshow ignores masked RGB regions for some reason,
# add an alpha channel instead.
# https://stackoverflow.com/questions/60561680
mask = np.all(image == mask_unknown_value_rgb, 2)
max_alpha = 255 if image.dtype == np.uint8 else 1.
image = np.dstack((image, (~mask).astype(image.dtype) * max_alpha))
else:
# E.g. if there's already an alpha channel it doesn't make sense.
logger.warning("masking unknown map cells is not supported "
"with {}-channel {} pixels".format(
n_channels, image.dtype))
original_bbox = copy.deepcopy(ax.dataLim)
# Squeeze extent to reflect metric coordinates.
resolution = metadata["resolution"]
n_rows, n_cols = image.shape[x_idx], image.shape[y_idx]
metric_width = n_cols * resolution
metric_height = n_rows * resolution
extent = [0, metric_width, 0, metric_height]
if plot_mode == PlotMode.yx:
image = np.rot90(image)
image = np.fliplr(image)
ax_image = ax.imshow(image, origin="upper", cmap=cmap, extent=extent,
zorder=1, alpha=alpha)
# Transform map frame to plot axis origin.
map_to_pixel_origin = Affine2D()
map_to_pixel_origin.translate(metadata["origin"][x_idx],
metadata["origin"][y_idx])
angle = metadata["origin"][2]
if plot_mode == PlotMode.yx:
# Rotation axis (z) points downwards.
angle *= -1
map_to_pixel_origin.rotate(angle)
ax_image.set_transform(map_to_pixel_origin + ax.transData)
if viewport in (viewport.update, viewport.zoom_to_map):
bbox = map_to_pixel_origin.transform_bbox(
Bbox(np.array([[0, 0], [metric_width, metric_height]])))
if viewport == viewport.update:
# Data limits aren't updated properly after the transformation by
# ax.relim() / ax.autoscale_view(), so we have to do it manually...
# Not ideal, but it allows to avoid a clipping viewport.
# TODO: check if this is a bug in matplotlib.
ax.dataLim = Bbox.union([original_bbox, bbox])
elif viewport == viewport.zoom_to_map:
ax.dataLim = bbox
elif viewport == viewport.keep_unchanged:
ax.dataLim = original_bbox
ax.autoscale_view()
# Initially flipped axes are lost for mysterious reasons...
if SETTINGS.plot_invert_xaxis:
ax.invert_xaxis()
if SETTINGS.plot_invert_yaxis:
ax.invert_yaxis()
| 32,864 | 39.524044 | 83 | py |
evo | evo-master/evo/tools/settings.py | """
Provides functionality for loading and resetting the package settings.
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import json
import logging
import typing
from colorama import Fore
from evo import EvoException, __version__
logger = logging.getLogger(__name__)
USER_ASSETS_PATH = os.path.join(os.path.expanduser('~'), ".evo")
USER_ASSETS_VERSION_PATH = os.path.join(USER_ASSETS_PATH, "assets_version")
DEFAULT_PATH = os.path.join(USER_ASSETS_PATH, "settings.json")
GLOBAL_LOGFILE_PATH = os.path.join(USER_ASSETS_PATH, "evo.log")
class SettingsException(EvoException):
pass
class SettingsContainer(dict):
def __init__(self, data: dict, lock: bool = True):
super(SettingsContainer, self).__init__()
for k, v in data.items():
setattr(self, k, v)
setattr(self, "__locked__", lock)
@classmethod
def from_json_file(cls, settings_path: str) -> 'SettingsContainer':
with open(settings_path) as settings_file:
data = json.load(settings_file)
return SettingsContainer(data)
def locked(self) -> bool:
if "__locked__" in self:
return self["__locked__"]
return False
def __getattr__(self, attr):
# allow dot access
if attr not in self:
raise SettingsException("unknown settings parameter: " + str(attr))
return self[attr]
def __setattr__(self, attr, value):
# allow dot access
if self.locked() and attr not in self:
raise SettingsException(
"write-access locked, can't add new parameter {}".format(attr))
else:
self[attr] = value
def update_existing_keys(self, other: dict):
self.update((key, other[key]) for key in self.keys() & other.keys())
def merge_dicts(first: dict, second: dict, soft: bool = False) -> dict:
if soft:
first.update({k: v for k, v in second.items() if k not in first})
else:
first.update(second)
return first
def write_to_json_file(json_path: str, dictionary: dict) -> None:
with open(json_path, 'w') as json_file:
json_file.write(json.dumps(dictionary, indent=4, sort_keys=True))
def reset(destination: str = DEFAULT_PATH,
parameter_subset: typing.Optional[typing.Sequence] = None) -> None:
from evo.tools.settings_template import DEFAULT_SETTINGS_DICT
if not os.path.exists(destination) or parameter_subset is None:
write_to_json_file(destination, DEFAULT_SETTINGS_DICT)
elif parameter_subset:
reset_settings = json.load(open(destination))
for parameter in parameter_subset:
if parameter not in DEFAULT_SETTINGS_DICT:
continue
reset_settings[parameter] = DEFAULT_SETTINGS_DICT[parameter]
write_to_json_file(destination, reset_settings)
def initialize_if_needed() -> None:
"""
Initialize evo user folder after first installation
(or if it was deleted).
"""
if not os.path.isdir(USER_ASSETS_PATH):
os.makedirs(USER_ASSETS_PATH)
if not os.path.exists(USER_ASSETS_VERSION_PATH):
open(USER_ASSETS_VERSION_PATH, 'w').write(__version__)
if not os.path.exists(DEFAULT_PATH):
try:
reset(destination=DEFAULT_PATH)
print("{}Initialized new {}{}".format(Fore.LIGHTYELLOW_EX,
DEFAULT_PATH, Fore.RESET))
except:
logger.error(
"Fatal: failed to write package settings file {}".format(
DEFAULT_PATH))
raise
def update_if_outdated() -> None:
"""
Update user settings to a new version if needed.
"""
if open(USER_ASSETS_VERSION_PATH).read() == __version__:
return
from evo.tools.settings_template import DEFAULT_SETTINGS_DICT
old_settings = json.loads(open(DEFAULT_PATH).read())
updated_settings = merge_dicts(old_settings, DEFAULT_SETTINGS_DICT,
soft=True)
write_to_json_file(DEFAULT_PATH, updated_settings)
open(USER_ASSETS_VERSION_PATH, 'w').write(__version__)
print("{}Updated outdated {}{}".format(Fore.LIGHTYELLOW_EX, DEFAULT_PATH,
Fore.RESET))
# Load the user settings into this container.
initialize_if_needed()
update_if_outdated()
SETTINGS = SettingsContainer.from_json_file(DEFAULT_PATH)
| 5,045 | 33.094595 | 79 | py |
evo | evo-master/evo/tools/settings_template.py | """
default package settings definition
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import pkgutil
def get_default_plot_backend() -> str:
if os.name == "posix" and os.getenv("DISPLAY", default="") == "":
return "Agg"
backends = {"PyQt5": "Qt5Agg"}
for pkg in backends:
if pkgutil.find_loader(pkg) is not None:
return backends[pkg]
return "TkAgg"
# default settings with documentation
# yapf: disable
DEFAULT_SETTINGS_DICT_DOC = {
"global_logfile_enabled": (
False,
("Whether to write a global logfile to the home folder.\n"
"Run 'evo pkg --logfile' to see the logfile location.")
),
"console_logging_format": (
"%(message)s",
"Format string for the logging module (affects only console output)."
),
"euler_angle_sequence": (
"sxyz",
("Only used in evo_traj's RPY plot: Euler rotation axis sequence.\n"
"E.g. 'sxyz' or 'ryxy', where s=static or r=rotating frame.\n"
"See evo/core/transformations.py for more information.")
),
"plot_axis_marker_scale": (
0.,
"Scaling parameter of pose coordinate frame markers. 0 will draw nothing."
),
"plot_backend": (
get_default_plot_backend(),
"matplotlib backend - default is 'Qt5Agg' (if PyQt is installed) or 'TkAgg'."
),
"plot_pose_correspondences": (
False,
"If enabled, lines will be plotted that connect corresponding poses"
" between the reference and synced trajectories."
),
"plot_pose_correspondences_linestyle": (
"dotted",
"Style of pose correspondence markers: "
"'solid', 'dashed', 'dashdot' or 'dotted'"
),
"plot_statistics": (
["rmse", "median", "mean", "std", "min", "max"],
("Statistics that are included in plots of evo_{ape, rpe, res}.\n"
"Can also be set to 'none'.")
),
"plot_figsize": (
[6, 6],
"The default size of one (sub)plot figure (width, height)."
),
"plot_fontfamily": (
"sans-serif",
"Font family string supported by matplotlib."
),
"plot_fontscale": (
1.0,
"Font scale value, see: https://seaborn.pydata.org/generated/seaborn.set.html"
),
"plot_invert_xaxis": (
False,
"Invert the x-axis of plots."
),
"plot_invert_yaxis": (
False,
"Invert the y-axis of plots."
),
"plot_linewidth": (
1.5,
"Line width value supported by matplotlib."
),
"plot_mode_default": (
"xyz",
"Default value for --plot_mode used in evo_{traj, ape, rpe}."
),
"plot_multi_cmap": (
"none",
"Color map for coloring plots from multiple data sources.\n"
+ "'none' will use the default color palette, see plot_seaborn_palette."
),
"plot_reference_alpha": (
0.5,
"Alpha value of the reference trajectories in plots."
),
"plot_reference_color": (
"black",
"Color of the reference trajectories in plots."
),
"plot_reference_linestyle": (
"--",
"matplotlib linestyle of reference trajectories in plots."
),
"plot_reference_axis_marker_scale": (
0.,
"Scaling parameter of pose coordinate frame markers of reference trajectories. "
+ "0 will draw nothing."
),
"plot_seaborn_palette": (
"deep6",
"Default color cycle, taken from a palette of the seaborn package.\n"
"Can also be a list of colors.\n"
"See: https://seaborn.pydata.org/generated/seaborn.color_palette.html"
),
"plot_seaborn_style": (
"darkgrid",
"Defines the plot background/grid.\n"
+ "Options: 'whitegrid', 'darkgrid', 'white' or 'dark'."
),
"plot_show_axis": (
True,
"Enables / disables the plot axis in trajectory plots."
),
"plot_show_legend": (
True,
"Enables / disables the legend in trajectory plots."
),
"plot_split": (
False,
"Show / save each figure separately instead of a collection."
),
"plot_start_end_markers": (
False,
"Mark the start and end of a trajectory with a symbol.\n"
"Start is marked with a circle, end with a cross."
),
"plot_texsystem": (
"pdflatex",
"'xelatex', 'lualatex' or 'pdflatex', see: https://matplotlib.org/users/pgf.html",
),
"plot_trajectory_alpha": (
0.75,
"Alpha value of non-reference trajectories in plots.",
),
"plot_trajectory_cmap": (
"jet",
"matplotlib color map used for mapping values on a trajectory.",
),
"plot_trajectory_linestyle": (
"-",
"matplotlib linestyle of non-reference trajectories in plots.",
),
"plot_usetex": (
False,
"Use the LaTeX renderer configured in plot_texsystem for plots.",
),
"plot_xyz_realistic": (
True,
"Equal axes ratio for realistic trajectory plots.\n"
"Turning it off allows to stretch the plot without keeping the ratio."
),
"ros_map_alpha_value": (
1.0,
"Alpha value for blending ROS map image slices."
),
"ros_map_cmap": (
"Greys_r",
"matplotlib colormap for coloring ROS map cells."
),
"ros_map_enable_masking": (
True,
"Enables/disables the masking of unknown cells from a map image,\n"
"based on the 'ros_map_unknown_cell_value'."
),
"ros_map_unknown_cell_value": (
205,
"uint8 value that represents unknown cells in a ROS map image.\n"
"Used to remove unknown cell pixels when a ROS map is added to a plot."
"\nmap_saver uses 205, other tools might not.\n"
"(for example, Cartographer uses 128 for images of probability grids)"
"\nHas no effect if ros_map_enable_masking is set to false."
),
"ros_map_viewport": (
"keep_unchanged",
"How to change the plot axis limits (viewport) when plotting a map.\n"
"One of the following options: keep_unchanged, zoom_to_map, update"
),
"save_traj_in_zip": (
False,
"Store backup trajectories in result zip files (increases size)."
),
"table_export_data": (
"stats",
"Which data to export: 'info', 'stats' or 'error_array'.",
),
"table_export_format": (
"csv",
"Format for exporting tables, e.g. 'csv', 'excel', 'latex', 'json'...",
),
"table_export_transpose": (
True,
"Transpose tables for export."
),
"tf_cache_lookup_frequency": (
10,
"Frequency for looking up transformations when loading trajectories \n"
"from a TF topic, in Hz."
),
"tf_cache_max_time": (
1e4,
"TF transform cache time in seconds."
),
}
# yapf: enable
# without documentation
DEFAULT_SETTINGS_DICT = {k: v[0] for k, v in DEFAULT_SETTINGS_DICT_DOC.items()}
| 7,636 | 31.360169 | 90 | py |
evo | evo-master/evo/tools/tf_cache.py | # -*- coding: UTF8 -*-
"""
TF topic handling
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import warnings
from collections import defaultdict
from typing import DefaultDict, List, Optional
import numpy as np
import rospy
import tf2_py
from geometry_msgs.msg import TransformStamped
from rosbags.rosbag1 import Reader as Rosbag1Reader
from rosbags.serde.serdes import deserialize_cdr, ros1_to_cdr
from std_msgs.msg import Header
from evo import EvoException
from evo.core.trajectory import PoseTrajectory3D
from evo.tools import tf_id
from evo.tools.file_interface import _get_xyz_quat_from_transform_stamped
from evo.tools.settings import SETTINGS
logger = logging.getLogger(__name__)
class TfCacheException(EvoException):
pass
class TfCache(object):
"""
For caching TF messages and looking up trajectories of specific transforms.
"""
def __init__(self):
self.buffer = tf2_py.BufferCore(
rospy.Duration.from_sec(SETTINGS.tf_cache_max_time))
self.topics = []
self.bags = []
def clear(self) -> None:
logger.debug("Clearing TF cache.")
self.buffer.clear()
self.topics = []
self.bags = []
# TODO: support also ROS2 bag reader.
def from_bag(self, reader: Rosbag1Reader, topic: str = "/tf",
static_topic: str = "/tf_static") -> None:
"""
Loads the TF topics from a bagfile into the buffer,
if it's not already cached.
:param reader: opened bag reader (rosbags.rosbag1)
:param topic: TF topic
"""
tf_topics = [topic]
if topic not in reader.topics:
raise TfCacheException(
"no messages for topic {} in bag".format(topic))
# Implicitly add static TFs to buffer if present.
if static_topic in reader.topics:
tf_topics.append(static_topic)
# Add TF data to buffer if this bag/topic pair is not already cached.
for tf_topic in tf_topics:
if tf_topic in self.topics and reader.path.name in self.bags:
logger.debug("Using cache for topic {} from {}".format(
tf_topic, reader.path.name))
continue
logger.debug("Caching TF topic {} from {} ...".format(
tf_topic, reader.path.name))
connections = [
c for c in reader.connections if c.topic == tf_topic
]
for connection, _, rawdata in reader.messages(
connections=connections):
msg = deserialize_cdr(ros1_to_cdr(rawdata, connection.msgtype),
connection.msgtype)
for tf in msg.transforms:
# Convert from rosbags.typesys.types to native ROS.
# Related: https://gitlab.com/ternaris/rosbags/-/issues/13
stamp = rospy.Time()
stamp.secs = tf.header.stamp.sec
stamp.nsecs = tf.header.stamp.nanosec
tf = TransformStamped(Header(0, stamp, tf.header.frame_id),
tf.child_frame_id, tf.transform)
if tf_topic == static_topic:
self.buffer.set_transform_static(tf, __name__)
else:
self.buffer.set_transform(tf, __name__)
self.topics.append(tf_topic)
self.bags.append(reader.path.name)
def lookup_trajectory(self, parent_frame: str, child_frame: str,
timestamps: List[rospy.Time]) -> PoseTrajectory3D:
"""
Look up the trajectory of a transform chain from the cache's TF buffer.
:param parent_frame, child_frame: TF transform frame IDs
:param timestamps: timestamps at which to lookup the trajectory poses.
:param lookup_frequency: frequency of TF lookups between start and end
time, in Hz.
"""
stamps, xyz, quat = [], [], []
# Look up the transforms of the trajectory in reverse order:
timestamps.sort()
for timestamp in timestamps:
try:
tf = self.buffer.lookup_transform_core(parent_frame,
child_frame, timestamp)
except tf2_py.ExtrapolationException:
continue
stamps.append(tf.header.stamp.to_sec())
x, q = _get_xyz_quat_from_transform_stamped(tf)
xyz.append(x)
quat.append(q)
# Flip the data order again for the final trajectory.
trajectory = PoseTrajectory3D(
np.array(xyz), np.array(quat), np.array(stamps), meta={
"frame_id": parent_frame,
"child_frame_id": child_frame
})
return trajectory
def get_trajectory(
self, reader: Rosbag1Reader, identifier: str,
timestamps: Optional[List[rospy.Time]] = None) -> PoseTrajectory3D:
"""
Get a TF trajectory from a bag file. Updates or uses the cache.
:param reader: opened bag reader (rosbags.rosbag1)
:param identifier: trajectory ID <topic>:<parent_frame>.<child_frame>
Example: /tf:map.base_link
"""
split_id = tf_id.split_id(identifier)
topic, parent, child = split_id[0], split_id[1], split_id[2]
static_topic = split_id[3] if len(split_id) == 4 else "/tf_static"
logger.debug(f"Loading trajectory of transform '{parent} to {child}' "
f"from topic {topic} (static topic: {static_topic}).")
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.from_bag(reader, topic, static_topic)
try:
latest_time = self.buffer.get_latest_common_time(parent, child)
except (tf2_py.LookupException, tf2_py.TransformException) as e:
raise TfCacheException("Could not load trajectory: " + str(e))
# rosbags Reader start_time is in nanoseconds.
start_time = rospy.Time.from_sec(reader.start_time * 1e-9)
if timestamps is None:
timestamps = []
# Static TF have zero timestamp in the buffer, which will be lower
# than the bag start time. Looking up a static TF is a valid request,
# so this should be possible.
if latest_time < start_time:
timestamps.append(latest_time)
else:
step = rospy.Duration.from_sec(
1. / SETTINGS.tf_cache_lookup_frequency)
time = start_time
while time <= latest_time:
timestamps.append(time)
time = time + step
return self.lookup_trajectory(parent, child, timestamps)
__instance: DefaultDict[int, TfCache] = defaultdict(lambda: TfCache())
def instance(hash: int) -> TfCache:
""" Hacky module-level "singleton" of TfCache """
global __instance
return __instance[hash]
| 7,737 | 40.159574 | 81 | py |
evo | evo-master/evo/tools/tf_id.py | """
TF topic ID string handling
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from evo import EvoException
ROS_NAME_REGEX = re.compile(r"([\/|_|0-9|a-z|A-Z]+)")
class TfIdException(EvoException):
pass
def split_id(identifier: str) -> tuple:
match = ROS_NAME_REGEX.findall(identifier)
# If a fourth component exists, it's interpreted as the static TF name.
if not len(match) in (3, 4):
raise TfIdException(
"ID string malformed, it should look similar to this: "
"/tf:map.base_footprint")
return tuple(match)
def check_id(identifier: str) -> bool:
try:
split_id(identifier)
except TfIdException:
return False
return True
| 1,342 | 26.979167 | 75 | py |
evo | evo-master/evo/tools/user.py | """
user interaction functions
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import logging
logger = logging.getLogger(__name__)
def prompt_val(msg: str = "enter a value:") -> str:
return input(msg + "\n")
def confirm(msg: str = "enter 'y' to confirm or any other key to cancel",
key: str = 'y') -> bool:
if input(msg + "\n") != key:
return False
else:
return True
def check_and_confirm_overwrite(file_path: str) -> bool:
if os.path.isfile(file_path):
logger.warning(file_path + " exists, overwrite?")
return confirm("enter 'y' to overwrite or any other key to cancel")
else:
return True
| 1,301 | 27.933333 | 75 | py |
evo | evo-master/test/ape_rpe_smoke_test.py | #!/usr/bin/env python
import os
import shutil
import subprocess as sp
tmp_dir = "tmp"
cfg_dir = "cfg/ape_rpe"
here = os.path.dirname(os.path.abspath(__file__))
# always run in script location
os.chdir(here)
metrics = ["evo_ape", "evo_rpe"]
data = [
"euroc data/V102_groundtruth.csv data/V102.txt",
"kitti data/KITTI_00_gt.txt data/KITTI_00_ORB.txt",
"tum data/fr2_desk_groundtruth.txt data/fr2_desk_ORB.txt",
"bag data/ROS_example.bag groundtruth S-PTAM"
]
try:
for m in metrics:
for d in data:
for cfg in os.listdir(cfg_dir):
os.mkdir(tmp_dir)
cfg = os.path.join(cfg_dir, cfg)
cmd = "{} {} -c {}".format(m, d, cfg)
print("[smoke test] {}".format(cmd))
output = sp.check_output(cmd.split(" "), cwd=here)
shutil.rmtree(tmp_dir)
except sp.CalledProcessError as e:
print(e.output.decode("utf-8"))
raise
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
| 1,022 | 25.230769 | 66 | py |
evo | evo-master/test/helpers.py | """
Common helper functions and classes for tests.
Author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from evo.core import lie_algebra as lie
from evo.core.trajectory import PosePath3D, PoseTrajectory3D
def random_se3_list(length):
return [lie.random_se3() for _ in range(length)]
def fake_timestamps(length, distance, start_time=0.):
return np.array([start_time + (distance * i) for i in range(length)])
def fake_path(length):
return PosePath3D(poses_se3=random_se3_list(length))
def fake_trajectory(length, timestamp_distance, start_time=0.):
return PoseTrajectory3D(
poses_se3=random_se3_list(length), timestamps=fake_timestamps(
length, timestamp_distance, start_time))
| 1,357 | 30.581395 | 73 | py |
evo | evo-master/test/res_smoke_test.py | #!/usr/bin/env python
import os
import shutil
import subprocess as sp
tmp_dir = "tmp"
cfg_dir = "cfg/res"
here = os.path.dirname(os.path.abspath(__file__))
# always run in script location
os.chdir(here)
data = [
"data/res_files/orb_rpe-for-each.zip data/res_files/sptam_rpe-for-each.zip",
"data/res_files/orb_rpe.zip data/res_files/sptam_rpe.zip",
"data/res_files/orb_ape.zip data/res_files/sptam_ape.zip",
]
try:
for d in data:
for cfg in os.listdir(cfg_dir):
os.mkdir(tmp_dir)
cfg = os.path.join(cfg_dir, cfg)
cmd = "evo_res {} -c {}".format(d, cfg)
print("[smoke test] {}".format(cmd))
output = sp.check_output(cmd.split(" "), cwd=here)
shutil.rmtree(tmp_dir)
except sp.CalledProcessError as e:
print(e.output.decode("utf-8"))
raise
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
| 917 | 25.228571 | 80 | py |
evo | evo-master/test/run_all_demos.sh | #!/usr/bin/env bash
set -e # exit on error
n=""
if [[ $* == *--no_plots* ]]; then
n="--no_plots"
fi
# run all demo scripts to get cheap app tests
yes | demos/traj_demo.sh "$n"
yes | demos/ape_demo.sh "$n"
yes | demos/rpe_demo.sh "$n"
yes | demos/res_demo.sh "$n"
yes | demos/latex_demo.sh "$n"
echo "enter 'y' to clean, any other key to exit"
read input
if [[ $input == y ]]; then
demos/clean.sh
exit 0
fi | 422 | 18.227273 | 48 | sh |
evo | evo-master/test/test_file_interface.py | #!/usr/bin/env python
"""
Unit test for file_interface module.
Author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import io
import tempfile
import unittest
import numpy as np
from rosbags.rosbag1 import (Reader as Rosbag1Reader, Writer as Rosbag1Writer)
from rosbags.rosbag2 import (Reader as Rosbag2Reader, Writer as Rosbag2Writer)
import helpers
from evo.core.result import Result
from evo.core.trajectory import PosePath3D, PoseTrajectory3D
from evo.tools import file_interface
class MockFileTestCase(unittest.TestCase):
def __init__(self, in_memory_buffer, *args, **kwargs):
super(MockFileTestCase, self).__init__(*args, **kwargs)
self.mock_file = in_memory_buffer
@staticmethod
def run_and_clear(test_method):
def _decorator(self, *args, **kwargs):
try:
test_method(self, *args, **kwargs)
finally:
self.mock_file.seek(0)
self.mock_file.truncate()
return _decorator
@staticmethod
def allow_import_error(test_method):
def _decorator(self, *args, **kwargs):
try:
test_method(self, *args, **kwargs)
except ImportError:
pass
return _decorator
class TestTumFile(MockFileTestCase):
def __init__(self, *args, **kwargs):
super(TestTumFile, self).__init__(io.StringIO(), *args, **kwargs)
@MockFileTestCase.run_and_clear
def test_write_read_integrity(self):
traj_out = helpers.fake_trajectory(1000, 0.1)
self.assertTrue(traj_out.check())
file_interface.write_tum_trajectory_file(self.mock_file, traj_out)
self.mock_file.seek(0)
traj_in = file_interface.read_tum_trajectory_file(self.mock_file)
self.assertIsInstance(traj_in, PoseTrajectory3D)
self.assertTrue(traj_in.check())
self.assertTrue(traj_out == traj_in)
@MockFileTestCase.run_and_clear
def test_trailing_delim(self):
self.mock_file.write(u"0 0 0 0 0 0 0 1 ")
self.mock_file.seek(0)
with self.assertRaises(file_interface.FileInterfaceException):
file_interface.read_tum_trajectory_file(self.mock_file)
@MockFileTestCase.run_and_clear
def test_too_many_columns(self):
self.mock_file.write(u"1 2 3 4 5 6 7 8 9")
self.mock_file.seek(0)
with self.assertRaises(file_interface.FileInterfaceException):
file_interface.read_tum_trajectory_file(self.mock_file)
@MockFileTestCase.run_and_clear
def test_too_few_columns(self):
self.mock_file.write(u"1 2 3 4 5 6 7")
self.mock_file.seek(0)
with self.assertRaises(file_interface.FileInterfaceException):
file_interface.read_tum_trajectory_file(self.mock_file)
@MockFileTestCase.run_and_clear
def test_too_few_columns_with_trailing_delim(self):
self.mock_file.write(u"1 2 3 4 5 6 7 ")
self.mock_file.seek(0)
with self.assertRaises(file_interface.FileInterfaceException):
file_interface.read_tum_trajectory_file(self.mock_file)
class TestKittiFile(MockFileTestCase):
def __init__(self, *args, **kwargs):
super(TestKittiFile, self).__init__(io.StringIO(), *args, **kwargs)
@MockFileTestCase.run_and_clear
def test_write_read_integrity(self):
traj_out = helpers.fake_path(1000)
self.assertTrue(traj_out.check())
file_interface.write_kitti_poses_file(self.mock_file, traj_out)
self.mock_file.seek(0)
traj_in = file_interface.read_kitti_poses_file(self.mock_file)
self.assertIsInstance(traj_in, PosePath3D)
self.assertTrue(traj_in.check())
self.assertTrue(traj_out == traj_in)
@MockFileTestCase.run_and_clear
def test_trailing_delim(self):
self.mock_file.write(u"1 0 0 0.1 0 1 0 0.2 0 0 1 0.3 ")
self.mock_file.seek(0)
with self.assertRaises(file_interface.FileInterfaceException):
file_interface.read_kitti_poses_file(self.mock_file)
@MockFileTestCase.run_and_clear
def test_too_many_columns(self):
self.mock_file.write(u"1 2 3 4 5 6 7 8 9 10 11 12 13")
self.mock_file.seek(0)
with self.assertRaises(file_interface.FileInterfaceException):
file_interface.read_kitti_poses_file(self.mock_file)
@MockFileTestCase.run_and_clear
def test_too_few_columns(self):
self.mock_file.write(u"1 2 3 4 5 6 7 8 9 10 11")
self.mock_file.seek(0)
with self.assertRaises(file_interface.FileInterfaceException):
file_interface.read_kitti_poses_file(self.mock_file)
@MockFileTestCase.run_and_clear
def test_too_few_columns_with_trailing_delim(self):
self.mock_file.write(u"1 2 3 4 5 6 7 8 9 10 11 ")
self.mock_file.seek(0)
with self.assertRaises(file_interface.FileInterfaceException):
file_interface.read_kitti_poses_file(self.mock_file)
class TestBagFile(MockFileTestCase):
def __init__(self, *args, **kwargs):
super(TestBagFile, self).__init__(io.BytesIO(), *args, **kwargs)
def test_write_read_integrity(self):
for reader_t, writer_t in zip([Rosbag1Reader, Rosbag2Reader],
[Rosbag1Writer, Rosbag2Writer]):
# TODO: rosbags cannot overwrite existing paths, this forces us
# to do this here to get only a filepath:
tmp_filename = tempfile.NamedTemporaryFile(delete=True).name
bag_out = writer_t(tmp_filename)
bag_out.open()
traj_out = helpers.fake_trajectory(1000, 0.1)
self.assertTrue(traj_out.check())
file_interface.write_bag_trajectory(bag_out, traj_out, "/test",
frame_id="map")
bag_out.close()
bag_in = reader_t(tmp_filename)
bag_in.open()
traj_in = file_interface.read_bag_trajectory(bag_in, "/test")
self.assertIsInstance(traj_in, PoseTrajectory3D)
self.assertTrue(traj_in.check())
self.assertTrue(traj_out == traj_in)
self.assertEqual(traj_in.meta["frame_id"], "map")
class TestResultFile(MockFileTestCase):
def __init__(self, *args, **kwargs):
super(TestResultFile, self).__init__(io.BytesIO(), *args, **kwargs)
@MockFileTestCase.run_and_clear
def test_write_read_integrity(self):
result_out = Result()
result_out.add_np_array("test-array", np.ones(1000))
result_out.add_info({"name": "test", "number": 666})
result_out.add_trajectory("traj", helpers.fake_trajectory(1000, 0.1))
file_interface.save_res_file(self.mock_file, result_out)
result_in = file_interface.load_res_file(self.mock_file,
load_trajectories=True)
self.assertEqual(result_in, result_out)
class TestHasUtf8Bom(unittest.TestCase):
def test_no_bom(self):
tmp_file = tempfile.NamedTemporaryFile(delete=False)
with open(tmp_file.name, 'w') as f:
f.write("foo")
self.assertFalse(file_interface.has_utf8_bom(tmp_file.name))
def test_with_bom(self):
tmp_file = tempfile.NamedTemporaryFile(delete=False)
with open(tmp_file.name, 'wb') as f:
f.write(b"\xef\xbb\xbf")
self.assertTrue(file_interface.has_utf8_bom(tmp_file.name))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 8,093 | 37.727273 | 78 | py |
evo | evo-master/test/test_filters.py | #!/usr/bin/env python
"""
unit test for filters module
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import unittest
import numpy as np
from evo.core import filters
from evo.core import lie_algebra as lie
# TODO: clean these up and use proper fixtures.
POSES_1 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 1]))
]
POSES_2 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])),
lie.se3(np.eye(3), np.array([0, 0, 1.0]))
]
POSES_3 = [
lie.se3(np.eye(3), np.array([0, 0, 0.0])),
lie.se3(np.eye(3), np.array([0, 0, 0.9])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])),
lie.se3(np.eye(3), np.array([0, 0, 0.999])),
lie.se3(np.eye(3), np.array([0, 0, 0.9999])),
lie.se3(np.eye(3), np.array([0, 0, 0.99999])),
lie.se3(np.eye(3), np.array([0, 0, 0.999999])),
lie.se3(np.eye(3), np.array([0, 0, 0.9999999]))
]
POSES_4 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1]))
]
class TestFilterPairsByPath(unittest.TestCase):
def test_poses1_all_pairs(self):
target_path = 1.0
tol = 0.0
id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 2), (2, 3)])
def test_poses1_wrong_target(self):
target_path = 2.5
tol = 0.0
id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [])
def test_poses2_all_pairs_low_tolerance(self):
target_path = 1.0
tol = 0.001
id_pairs = filters.filter_pairs_by_path(POSES_2, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 3)])
def test_convergence_all_pairs(self):
target_path = 1.0
tol = 0.2
id_pairs = filters.filter_pairs_by_path(POSES_3, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 7)])
axis = np.array([1, 0, 0])
POSES_5 = [
lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi / 3), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0]))
]
TRANSFORM = lie.random_se3()
POSES_5_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_5]
axis = np.array([1, 0, 0])
p0 = lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0]))
pd = lie.se3(lie.so3_exp(axis * (math.pi / 3.)), np.array([1, 2, 3]))
p1 = np.dot(p0, pd)
p2 = np.dot(p1, pd)
p3 = np.dot(p2, pd)
POSES_6 = [p0, p1, p2, p3, p3]
POSES_6_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_6]
class TestFilterPairsByAngle(unittest.TestCase):
def test_poses5(self):
tol = 0.001
expected_result = [(0, 1), (1, 2), (2, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_5, POSES_5_TRANSFORMED):
target_angle = math.pi - tol
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False)
self.assertEqual(id_pairs, expected_result)
# Check for same result when using degrees:
target_angle = np.rad2deg(target_angle)
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False,
degrees=True)
self.assertEqual(id_pairs, expected_result)
def test_poses5_all_pairs(self):
tol = 0.01
expected_result = [(0, 1), (0, 4), (1, 2), (2, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_5, POSES_5_TRANSFORMED):
target_angle = math.pi
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True)
self.assertEqual(id_pairs, expected_result)
# Check for same result when using degrees:
target_angle = np.rad2deg(target_angle)
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True,
degrees=True)
self.assertEqual(id_pairs, expected_result)
def test_poses6(self):
tol = 0.001
target_angle = math.pi - tol
expected_result = [(0, 3)]
# Result should be unaffected by global transformation.
for poses in (POSES_6, POSES_6_TRANSFORMED):
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False)
self.assertEqual(id_pairs, expected_result)
def test_poses6_all_pairs(self):
target_angle = math.pi
tol = 0.001
expected_result = [(0, 3), (0, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_6, POSES_6_TRANSFORMED):
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True)
self.assertEqual(id_pairs, expected_result)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 6,476 | 36.877193 | 78 | py |
evo | evo-master/test/test_lie_algebra.py | #!/usr/bin/env python
"""
unit test for lie_algebra module - mainly test mathematical correctness
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import random
import timeit
import unittest
import numpy as np
from evo.core import lie_algebra as lie
class TestSE3(unittest.TestCase):
def test_is_se3(self):
# yapf: disable
p = np.array([[1, 0, 0, 1],
[0, 0, -1, 2],
[0, 1, 0, 3],
[0, 0, 0, 1]])
self.assertTrue(lie.is_se3(p))
p_false = np.array([[1, 0, 0, 1],
[0, 0, -4223, 2],
[0, 111, 0, 3],
[0, 0, 0, 1]])
self.assertFalse(lie.is_se3(p_false))
# yapf: enable
def test_random_se3(self):
self.assertTrue(lie.is_se3(lie.random_se3()))
def test_se3_inverse(self):
p = lie.random_se3()
p_inv = lie.se3_inverse(p)
self.assertTrue(lie.is_se3(p_inv))
self.assertTrue(np.allclose(p_inv.dot(p), np.eye(4)))
def test_relative_se3(self):
a = lie.random_se3()
b = lie.random_se3()
self.assertTrue(lie.is_se3(a) and lie.is_se3(b))
a_to_b = lie.relative_se3(a, b)
self.assertTrue(lie.is_se3(a_to_b))
b_from_a = a.dot(a_to_b)
self.assertTrue(np.allclose(b_from_a, b))
class TestSO3(unittest.TestCase):
def test_is_so3(self):
# yapf: disable
r = np.array([[1, 0, 0],
[0, 0, -1],
[0, 1, 0]])
self.assertTrue(lie.is_so3(r))
# yapf: enable
def test_random_so3(self):
r = lie.random_so3()
self.assertTrue(lie.is_so3(r))
def test_relative_so3(self):
a = lie.random_so3()
b = lie.random_so3()
self.assertTrue(lie.is_so3(a) and lie.is_so3(b))
a_to_b = lie.relative_so3(a, b)
b_from_a = a.dot(a_to_b)
self.assertTrue(np.allclose(b_from_a, b))
def test_so3_from_se3(self):
p = lie.random_se3()
r = lie.so3_from_se3(p)
self.assertTrue(lie.is_so3(r))
def test_so3_log_exp(self):
r = lie.random_so3()
self.assertTrue(lie.is_so3(r))
rotvec = lie.so3_log(r)
self.assertTrue(np.allclose(r, lie.so3_exp(rotvec), atol=1e-6))
angle = lie.so3_log_angle(r)
self.assertAlmostEqual(np.linalg.norm(rotvec), angle)
def test_so3_log_exp_skew(self):
r = lie.random_so3()
log = lie.so3_log(r, return_skew=True) # skew-symmetric tangent space
# here, axis is a rotation vector with norm = angle
rotvec = lie.vee(log)
self.assertTrue(np.allclose(r, lie.so3_exp(rotvec)))
class TestSim3(unittest.TestCase):
def test_is_sim3(self):
r = lie.random_so3()
t = np.array([1, 2, 3])
s = 3
p = lie.sim3(r, t, s)
self.assertTrue(lie.is_sim3(p, s))
def test_sim3_scale_effect(self):
r = lie.random_so3()
t = np.array([0, 0, 0])
s = random.random() * 10
x = np.array([1, 0, 0, 1]).T # homogeneous vector
p = lie.sim3(r, t, s)
self.assertTrue(lie.is_sim3(p, s))
x = p.dot(x) # apply Sim(3) transformation
self.assertTrue(
np.equal(x,
lie.se3(r).dot(np.array([s, 0, 0, 1]))).all())
def test_sim3_inverse(self):
r = lie.random_so3()
t = np.array([1, 2, 3])
s = random.random() * 10
p = lie.sim3(r, t, s)
self.assertTrue(lie.is_sim3(p, s))
p_inv = lie.sim3_inverse(p)
self.assertTrue(np.allclose(p_inv.dot(p), np.eye(4)))
if __name__ == '__main__':
"""
benchmarks
"""
print("\ncheck speed of SE(3) inverse:")
setup = "from evo.core import lie_algebra as lie; " \
"import numpy as np; se3 = lie.random_se3()"
print("time for 1000*lie.se3_inverse(se3): ",
timeit.timeit("lie.se3_inverse(se3)", setup=setup, number=1000))
print("time for 1000*np.linalg.inv(se3): ",
timeit.timeit("np.linalg.inv(se3)", setup=setup, number=1000))
print("\ncheck speed of SO(3) log:")
setup = "from evo.core import lie_algebra as lie; " \
"import numpy as np; so3 = lie.random_so3()"
print("time for 1000*lie.so3_log(so3, skew=True): ",
timeit.timeit("lie.so3_log(so3, True)", setup=setup, number=1000))
print("time for 1000*lie.so3_log(so3): ",
timeit.timeit("lie.so3_log(so3)", setup=setup, number=1000))
setup = "from evo.core import lie_algebra as lie; import numpy as np; " \
"import evo.core.transformations as tr; " \
"so3 = lie.se3(lie.random_so3(), [0, 0, 0])"
print("time for 1000*tr.rotation_from_matrix(so3): ",
timeit.timeit("tr.rotation_from_matrix(so3)", setup=setup,
number=1000))
setup = "from evo.core import lie_algebra as lie; " \
"import numpy as np; so3 = lie.random_so3(); " \
"rotvec = lie.so3_log(so3, False)"
print("time for 1000*lie.so3_exp(rotvec): ",
timeit.timeit("lie.so3_exp(rotvec)", setup=setup, number=1000))
"""
unit test
"""
unittest.main(verbosity=2)
| 5,877 | 33.374269 | 78 | py |
evo | evo-master/test/test_result.py | #!/usr/bin/env python
"""
unit test for lie_algebra module - mainly test mathematical correctness
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import numpy as np
from evo.core import result
class TestMergeResult(unittest.TestCase):
def test_merge_strategy_average(self):
r1 = result.Result()
r1.add_np_array("test", np.array([1., 2., 3.]))
r1.add_stats({"bla": 1., "blub": 2.})
r2 = result.Result()
r2.add_np_array("test", np.array([0., 0., 0.]))
r2.add_stats({"bla": 0., "blub": 0.})
merged = result.merge_results([r1, r2])
self.assertTrue(
np.array_equal(merged.np_arrays["test"], np.array([0.5, 1., 1.5])))
self.assertEqual(merged.stats, {"bla": 0.5, "blub": 1.})
def test_merge_strategy_append(self):
r1 = result.Result()
r1.add_np_array("test", np.array([1., 2., 3.]))
r1.add_stats({"bla": 1., "blub": 2.})
r2 = result.Result()
r2.add_np_array("test", np.array([0.]))
r2.add_stats({"bla": 0., "blub": 0.})
merged = result.merge_results([r1, r2])
#yapf: disable
self.assertTrue(
np.array_equal(merged.np_arrays["test"],
np.array([1., 2., 3., 0.])))
# yapf: enable
self.assertEqual(merged.stats, {"bla": 0.5, "blub": 1.})
def test_non_matching_np_arrays_keys(self):
r1 = result.Result()
r1.add_np_array("test", np.array([]))
r1.add_np_array("test_2", np.array([]))
r2 = result.Result()
r2.add_np_array("test", np.array([]))
with self.assertRaises(result.ResultException):
result.merge_results([r1, r2])
def test_non_matching_stats_keys(self):
r1 = result.Result()
r1.add_stats({"bla": 1., "blub": 2.})
r2 = result.Result()
r2.add_stats({"foo": 1., "bar": 2.})
with self.assertRaises(result.ResultException):
result.merge_results([r1, r2])
| 2,629 | 35.027397 | 79 | py |
evo | evo-master/test/test_sync.py | #!/usr/bin/env python
"""
Unit test for evo.core.sync module
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import helpers
from evo.core import sync
class TestMatchingTimeIndices(unittest.TestCase):
def test_correct_positive_offset(self):
stamps_1 = helpers.fake_timestamps(10, 0.1, start_time=0.)
stamps_2 = helpers.fake_timestamps(10, 0.1, start_time=0.5)
matches = sync.matching_time_indices(stamps_1, stamps_2, offset_2=-0.5)
self.assertEqual(len(matches[0]), 10)
self.assertEqual(len(matches[1]), 10)
def test_correct_negative_offset(self):
stamps_1 = helpers.fake_timestamps(10, 0.1, start_time=0.)
stamps_2 = helpers.fake_timestamps(10, 0.1, start_time=-0.5)
matches = sync.matching_time_indices(stamps_1, stamps_2, offset_2=0.5)
self.assertEqual(len(matches[0]), 10)
self.assertEqual(len(matches[1]), 10)
def test_no_matches_due_to_offset(self):
stamps_1 = helpers.fake_timestamps(10, 0.1, start_time=0.)
stamps_2 = helpers.fake_timestamps(10, 0.1, start_time=2.)
matches = sync.matching_time_indices(stamps_1, stamps_2)
self.assertEqual(len(matches[0]), 0)
self.assertEqual(len(matches[1]), 0)
def test_max_diff(self):
stamps_1 = helpers.fake_timestamps(10, 0.1, start_time=0.01)
stamps_2 = helpers.fake_timestamps(500, 2e-3)
# default max_diff: 0.01
matches = sync.matching_time_indices(stamps_1, stamps_2)
self.assertEqual(len(matches[0]), 10)
self.assertEqual(len(matches[1]), 10)
matches = sync.matching_time_indices(stamps_2, stamps_1, max_diff=1e-3)
self.assertEqual(len(matches[0]), 10)
self.assertEqual(len(matches[1]), 10)
class TestAssociateTrajectories(unittest.TestCase):
def test_wrong_type(self):
path_1 = helpers.fake_path(10)
path_2 = helpers.fake_path(10)
with self.assertRaises(sync.SyncException):
sync.associate_trajectories(path_1, path_2)
def test_no_matches_due_to_offset(self):
traj_1 = helpers.fake_trajectory(10, 0.1, start_time=0.)
traj_2 = helpers.fake_trajectory(10, 0.1, start_time=2.)
with self.assertRaises(sync.SyncException):
sync.associate_trajectories(traj_1, traj_2)
def test_association(self):
traj_1 = helpers.fake_trajectory(10, 0.1)
traj_2 = helpers.fake_trajectory(100, 0.01)
traj_1_sync, traj_2_sync = sync.associate_trajectories(traj_1, traj_2)
self.assertEqual(traj_1_sync.num_poses, traj_2_sync.num_poses)
self.assertNotEqual(traj_2.num_poses, traj_2_sync.num_poses)
self.assertEqual(traj_2_sync.num_poses, 10)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 3,415 | 38.72093 | 79 | py |
evo | evo-master/test/test_trajectory.py | #!/usr/bin/env python
"""
unit test for trajectory module
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import copy
import numpy as np
import helpers
from evo.core import trajectory
from evo.core import lie_algebra as lie
from evo.core.trajectory import PosePath3D, PoseTrajectory3D
from evo.core.geometry import GeometryException
class TestPosePath3D(unittest.TestCase):
def test_init_wrong_args(self):
path = helpers.fake_path(10)
# no args
with self.assertRaises(trajectory.TrajectoryException):
trajectory.PosePath3D()
# only quaternion
with self.assertRaises(trajectory.TrajectoryException):
trajectory.PosePath3D(
orientations_quat_wxyz=path.orientations_quat_wxyz)
# only xyz
with self.assertRaises(trajectory.TrajectoryException):
trajectory.PosePath3D(positions_xyz=path.positions_xyz)
def test_init_correct(self):
# only poses_se3
path = helpers.fake_path(10)
try:
trajectory.PosePath3D(poses_se3=path.poses_se3)
except trajectory.TrajectoryException:
self.fail("unexpected init failure with only poses_se3")
# xyz + quaternion
try:
trajectory.PosePath3D(path.positions_xyz,
path.orientations_quat_wxyz)
except trajectory.TrajectoryException:
self.fail("unexpected init failure with xyz + quaternion")
# all
try:
trajectory.PosePath3D(path.positions_xyz,
path.orientations_quat_wxyz, path.poses_se3)
except trajectory.TrajectoryException:
self.fail(
"unexpected init failure with xyz + quaternion + poses_se3")
def test_equals(self):
path_1 = helpers.fake_path(10)
path_1_copy = copy.deepcopy(path_1)
path_2 = helpers.fake_path(15)
self.assertTrue(path_1 == path_1_copy)
self.assertFalse(path_1 == path_2)
self.assertTrue(path_1 != path_2)
self.assertFalse(path_1 != path_1_copy)
def test_equals_equivalent_quaternion(self):
"""
Checks that paths with equivalent quaternions (q = -q)
are treated as equal.
"""
path_1 = helpers.fake_path(10)
path_2 = PosePath3D(path_1.positions_xyz,
path_1.orientations_quat_wxyz * -1)
self.assertFalse(
np.allclose(path_1.orientations_quat_wxyz,
path_2.orientations_quat_wxyz))
self.assertEqual(path_1, path_2)
def test_reduce_to_ids(self):
path = helpers.fake_path(10)
path_reduced = copy.deepcopy(path)
path_reduced.reduce_to_ids([0, 2])
self.assertEqual(path_reduced.num_poses, 2)
# direct connection from 0 to 2 in initial should be reduced path length
len_initial_segment = np.linalg.norm(path.positions_xyz[2] -
path.positions_xyz[0])
len_reduced = path_reduced.path_length
self.assertAlmostEqual(len_initial_segment, len_reduced)
def test_transform(self):
path = helpers.fake_path(10)
path_transformed = copy.deepcopy(path)
t = lie.random_se3()
path_transformed.transform(t)
# traj_transformed.transform(lie.se3_inverse(t))
self.assertAlmostEqual(path_transformed.path_length, path.path_length)
def test_transform_sim3(self):
path = helpers.fake_path(10)
path_transformed = copy.deepcopy(path)
t = lie.sim3(r=lie.random_so3(), t=np.ones(3), s=1.234)
path_transformed.transform(t)
self.assertAlmostEqual(path_transformed.path_length,
path.path_length * 1.234)
def test_scale(self):
path = helpers.fake_path(10)
path_scaled = copy.deepcopy(path)
s = 5.234
path_scaled.scale(s)
len_initial = path.path_length
len_scaled = path_scaled.path_length
self.assertAlmostEqual(len_initial * s, len_scaled)
def test_check(self):
self.assertTrue(helpers.fake_path(10).check()[0])
path_wrong = helpers.fake_path(10)
_ = path_wrong.orientations_quat_wxyz
path_wrong._orientations_quat_wxyz[1][1] = 666
self.assertFalse(path_wrong.check()[0])
def test_get_infos(self):
helpers.fake_path(10).get_infos()
def test_get_statistics(self):
helpers.fake_path(10).get_statistics()
def test_distances(self):
path = helpers.fake_path(10)
self.assertEqual(path.distances[0], 0.0)
self.assertEqual(path.distances.size, path.num_poses)
self.assertAlmostEqual(path.distances[-1], path.path_length)
class TestPoseTrajectory3D(unittest.TestCase):
def test_equals(self):
traj_1 = helpers.fake_trajectory(10, 1)
traj_1_copy = copy.deepcopy(traj_1)
traj_2 = helpers.fake_trajectory(15, 1)
self.assertTrue(traj_1 == traj_1_copy)
self.assertFalse(traj_1 == traj_2)
self.assertTrue(traj_1 != traj_2)
self.assertFalse(traj_1 != traj_1_copy)
def test_reduce_to_ids(self):
traj = helpers.fake_trajectory(10, 1)
traj.reduce_to_ids([0, 2])
self.assertEqual(traj.num_poses, 2)
self.assertEqual(len(traj.timestamps), 2)
def test_reduce_to_time_range(self):
"""
Checks if a valid time-range reduces the trajectory correctly.
"""
traj = helpers.fake_trajectory(10, 1)
start = 1.2
end = 7.8
traj.reduce_to_time_range(start, end)
self.assertEqual(traj.num_poses, 6)
self.assertEqual(len(traj.timestamps), 6)
self.assertGreaterEqual(traj.timestamps[0], start)
self.assertLessEqual(traj.timestamps[-1], end)
def test_reduce_to_empty_time_range(self):
"""
A time-range that doesn't intersect should produce an empty trajectory.
"""
traj = helpers.fake_trajectory(10, 1)
start = 42
end = 666
traj.reduce_to_time_range(start, end)
self.assertEqual(traj.num_poses, 0)
self.assertEqual(len(traj.timestamps), 0)
def test_check(self):
self.assertTrue(helpers.fake_trajectory(10, 1).check()[0])
wrong_traj = helpers.fake_trajectory(10, 1)
wrong_traj.timestamps[0] = 666
self.assertFalse(wrong_traj.check()[0])
def test_get_infos(self):
helpers.fake_trajectory(10, 1).get_infos()
def test_get_statistics(self):
helpers.fake_trajectory(10, 1).get_statistics()
class TestTrajectoryAlignment(unittest.TestCase):
def test_se3_alignment(self):
traj = helpers.fake_trajectory(1000, 1)
traj_transformed = copy.deepcopy(traj)
traj_transformed.transform(lie.random_se3())
self.assertNotEqual(traj, traj_transformed)
traj_transformed.align(traj)
self.assertEqual(traj_transformed, traj)
def test_sim3_alignment(self):
traj = helpers.fake_trajectory(1000, 1)
traj_transformed = copy.deepcopy(traj)
traj_transformed.transform(lie.random_se3())
traj_transformed.scale(1.234)
self.assertNotEqual(traj, traj_transformed)
traj_transformed.align(traj, correct_scale=True)
self.assertEqual(traj_transformed, traj)
def test_scale_correction(self):
traj = helpers.fake_trajectory(1000, 1)
traj_transformed = copy.deepcopy(traj)
traj_transformed.scale(1.234)
self.assertNotEqual(traj, traj_transformed)
traj_transformed.align(traj, correct_only_scale=True)
self.assertEqual(traj_transformed, traj)
def test_origin_alignment(self):
traj_1 = helpers.fake_trajectory(1000, 1)
traj_2 = helpers.fake_trajectory(1000, 1)
self.assertFalse(np.allclose(traj_1.poses_se3[0], traj_2.poses_se3[0]))
traj_2.align_origin(traj_1)
self.assertTrue(np.allclose(traj_1.poses_se3[0], traj_2.poses_se3[0]))
def test_alignment_degenerate_case(self):
length = 100
poses = [lie.random_se3()] * length
traj_1 = PoseTrajectory3D(
poses_se3=poses,
timestamps=helpers.fake_timestamps(length, 1, 0.0))
traj_2 = copy.deepcopy(traj_1)
traj_2.transform(lie.random_se3())
traj_2.scale(1.234)
self.assertNotEqual(traj_1, traj_2)
with self.assertRaises(GeometryException):
traj_1.align(traj_2)
with self.assertRaises(GeometryException):
traj_1.align(traj_2, correct_scale=True)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 9,348 | 35.952569 | 80 | py |
evo | evo-master/test/traj_smoke_test.py | #!/usr/bin/env python
import os
import glob
import shutil
import subprocess as sp
tmp_dir = "tmp"
common_cfg_dir = "cfg/traj/common"
here = os.path.dirname(os.path.abspath(__file__))
# always run in script location
os.chdir(here)
data = {
"evo_traj euroc data/V102_groundtruth.csv --ref data/V102_groundtruth.csv": "cfg/traj/euroc",
"evo_traj kitti data/KITTI_00_gt.txt data/KITTI_00_ORB.txt data/KITTI_00_SPTAM.txt "
"--ref data/KITTI_00_gt.txt": "cfg/traj/kitti",
"evo_traj tum data/fr2_desk_groundtruth.txt data/fr2_desk_ORB.txt data/fr2_desk_ORB_kf_mono.txt "
"--ref data/fr2_desk_groundtruth.txt": "cfg/traj/tum",
"evo_traj bag data/ROS_example.bag groundtruth S-PTAM ORB-SLAM --ref groundtruth": "cfg/traj/bag"
}
try:
for d in data.keys():
for cfg_dir in (common_cfg_dir, data[d]):
for cfg in os.listdir(cfg_dir):
os.mkdir(tmp_dir)
cfg = os.path.join(cfg_dir, cfg)
cmd = "{} -c {}".format(d, cfg)
print("[smoke test] {}".format(cmd))
output = sp.check_output(cmd.split(" "), cwd=here)
shutil.rmtree(tmp_dir)
except sp.CalledProcessError as e:
print(e.output.decode("utf-8"))
raise
finally:
traj_files = glob.glob("./*.bag") + glob.glob("./*.kitti") + glob.glob(
"./*.tum")
for traj_file in traj_files:
os.remove(traj_file)
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
| 1,473 | 32.5 | 101 | py |
evo | evo-master/test/demos/ape_demo.sh | #!/usr/bin/env bash
set -e # exit on error
# printf "\033c" resets the output
function log { printf "\033c"; echo -e "\033[32m[$BASH_SOURCE] $1\033[0m"; }
function echo_and_run { echo -e "\$ $@" ; read input; "$@" ; read input; }
# always run in script directory
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
p="-p"
if [[ $* == *--no_plots* ]]; then
p=
fi
log "minimal command"
echo_and_run evo_ape kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt
log "show more output and plot: -v or --verbose and -p or --plot"
echo_and_run evo_ape kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -v $p
log "align the trajectories"
echo_and_run evo_ape kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -va $p
log "use other pose_relation (e.g. angle) with -r or --pose_relation"
echo_and_run evo_ape kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -va $p --pose_relation angle_deg
log "save results"
echo_and_run evo_ape kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -va --save_results ORB_ape.zip
log "run multiple and save results"
log "first..."
echo_and_run evo_ape kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -a --save_results ORB_ape.zip
log "second..."
echo_and_run evo_ape kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_SPTAM.txt -a --save_results SPTAM_ape.zip | 1,350 | 35.513514 | 109 | sh |
evo | evo-master/test/demos/clean.sh | #!/usr/bin/env bash
# always run in script directory
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
rm -I *.bag *.kitti *.tum *.csv *.pdf *.zip *.pgf *.log *.aux *.json
| 204 | 24.625 | 68 | sh |
evo | evo-master/test/demos/config_demo.sh | #!/usr/bin/env bash
set -e # exit on error
# printf "\033c" resets the output
function log { printf "\033c"; echo -e "\033[32m[$BASH_SOURCE] $1\033[0m"; }
function echo_and_run { echo -e "\$ $@" ; read input; "$@" ; read input; }
# always run in script directory
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
log "show package settings"
echo_and_run evo_config show
if [ -e cfg.json ]; then
log "show arbitrary .json config"
echo_and_run evo_config show cfg.json
fi
log "set some package settings"
echo_and_run evo_config set plot_figsize 6 5 plot_usetex plot_fontfamily serif
if [ -e cfg.json ]; then
log "set parameter of some arbitrary .json config"
echo_and_run evo_config set -c cfg.json mode speed
fi
log "reset package settings to defaults"
echo_and_run evo_config reset
log "generate a .json config from arbitrary command line options"
echo_and_run evo_config generate --flag --number 2.5 --string plot.pdf --list 1 2.3 4 | 989 | 29 | 85 | sh |
evo | evo-master/test/demos/latex_demo.sh | #!/usr/bin/env bash
# set -e # exit on error
# printf "\033c" resets the output
function log { printf "\033c"; echo -e "\033[32m[$BASH_SOURCE] $1\033[0m"; }
function echo_and_run { echo -e "\$ $@" ; read input; "$@" ; read input; }
# always run in script directory
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
log "configure LaTeX-friendly settings"
echo_and_run evo_config set plot_figsize 5 5 plot_usetex plot_fontfamily serif plot_linewidth 0.5 plot_seaborn_style whitegrid
log "generate .pgf figures"
echo_and_run evo_res *rpe.zip --save_plot example.pgf
log "generate .pdf from .tex"
echo_and_run pdflatex example.tex
if [[ ! $* == *--no_plots* ]]; then
evince example.pdf
fi
yes | evo_config reset
| 751 | 27.923077 | 126 | sh |
evo | evo-master/test/demos/res_demo.sh | #!/usr/bin/env bash
# printf "\033c" resets the output
function log { printf "\033c"; echo -e "\033[32m[$BASH_SOURCE] $1\033[0m"; }
function echo_and_run { echo -e "\$ $@" ; read input; "$@" ; read input; }
# always run in script directory
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
p="-p"
if [[ $* == *--no_plots* ]]; then
p=
fi
for m in ape rpe
do
ls *$m.zip > /dev/null
retcode=$?; if [ $retcode != 0 ]; then
echo "missing files: "*$m.zip
echo "run {ape, rpe}_demo.sh before this demo"
exit 1
else
echo "found files for $m"
fi
done
set -e # exit on error
for m in ape rpe
do
log "load results from evo_${m}..."
echo_and_run evo_res *"$m".zip
log "load results from evo_$m and plot them"
echo_and_run evo_res *"$m".zip $p
log "load results from evo_$m and save plots in pdf"
echo_and_run evo_res *"$m".zip --save_plot "$m".pdf
log "load results from evo_$m and save stats in table"
echo_and_run evo_res *"$m".zip --save_table "$m".csv
done
| 1,073 | 23.409091 | 76 | sh |
evo | evo-master/test/demos/rpe_demo.sh | #!/usr/bin/env bash
set -e # exit on error
# printf "\033c" resets the output
function log { printf "\033c"; echo -e "\033[32m[$BASH_SOURCE] $1\033[0m"; }
function echo_and_run { echo -e "\$ $@" ; read input; "$@" ; read input; }
# always run in script directory
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
p="-p"
if [[ $* == *--no_plots* ]]; then
p=
fi
log "minimal command (delta = 1 frame)"
echo_and_run evo_rpe kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt
log "show more output and plot: -v or --verbose and -p or --plot"
echo_and_run evo_rpe kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -v $p
log "use a different delta between the pose pairs:\n-d or --delta and -u or --delta_unit"
echo_and_run evo_rpe kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -v -d 10 -u m $p
log "use other pose_relation (e.g. angle) with -r or --pose_relation"
echo_and_run evo_rpe kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -v -d 10 -u m -r angle_deg $p
log "save results"
echo_and_run evo_rpe kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -v -d 10 -u m --save_results ORB_rpe.zip
log "run multiple and save results"
log "first..."
echo_and_run evo_rpe kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_ORB.txt -d 10 -u m --save_results ORB_rpe.zip
log "second..."
echo_and_run evo_rpe kitti ../data/KITTI_00_gt.txt ../data/KITTI_00_SPTAM.txt -d 10 -u m --save_results SPTAM_rpe.zip | 1,462 | 38.540541 | 117 | sh |
evo | evo-master/test/demos/traj_demo.sh | #!/usr/bin/env bash
set -e # exit on error
# printf "\033c" resets the output
function log { printf "\033c"; echo -e "\033[32m[$BASH_SOURCE] $1\033[0m"; }
function echo_and_run { echo -e "\$ $@" ; read input; "$@" ; read input; }
# always run in script directory
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
p="-p"
if [[ $* == *--no_plots* ]]; then
p=
fi
log "read single info"
echo_and_run evo_traj tum ../data/fr2_desk_ORB.txt
log "verbose mode: -v or --verbose"
echo_and_run evo_traj tum ../data/fr2_desk_ORB.txt -v
log "do a full check of the trajectory"
echo_and_run evo_traj tum ../data/fr2_desk_ORB.txt --full_check
log "load multiple trajectories"
echo_and_run evo_traj tum ../data/fr2_desk_*
log "plot trajectories: -p or --plot"
echo_and_run evo_traj tum ../data/fr2_desk_* --ref=../data/fr2_desk_groundtruth.txt --plot_mode xyz $p
log "align to reference to resolve mess: -a or --align"
echo_and_run evo_traj tum ../data/fr2_desk_* --ref=../data/fr2_desk_groundtruth.txt --plot_mode=xyz -a $p
log "additionally, scale for monocular trajectories"
echo_and_run evo_traj tum ../data/fr2_desk_* --ref=../data/fr2_desk_groundtruth.txt --plot_mode=xyz -as $p
log "save in other format - here: bagfile"
echo_and_run evo_traj tum ../data/fr2_desk_* --ref=../data/fr2_desk_groundtruth.txt -as --save_as_bag
log "plot bag contents"
echo_and_run evo_traj bag *.bag --all_topics --ref=fr2_desk_groundtruth $p
| 1,462 | 31.511111 | 106 | sh |
evo | evo-master/test/tum_benchmark_tools/README.md | These files are here just for comparing and testing.
They have been ported to support also Python 3.
Apart from that, the functionality is the same as in the original files.
Source: https://vision.in.tum.de/data/datasets/rgbd-dataset/tools
| 242 | 33.714286 | 72 | md |
evo | evo-master/test/tum_benchmark_tools/associate.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
The Kinect provides the color and depth images in an un-synchronized way. This means that the set of time stamps from the color images do not intersect with those of the depth images. Therefore, we need some way of associating color images to depth images.
For this purpose, you can use the ''associate.py'' script. It reads the time stamps from the rgb.txt file and the depth.txt file, and joins them by finding the best matches.
"""
import argparse
import sys
import os
import numpy
def read_file_list(filename):
"""
Reads a trajectory from a text file.
File format:
The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)
and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.
Input:
filename -- File name
Output:
dict -- dictionary of (stamp,data) tuples
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list = [(float(l[0]),l[1:]) for l in list if len(l)>1]
return dict(list)
def associate(first_list, second_list,offset,max_difference):
"""
Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim
to find the closest match for every input tuple.
Input:
first_list -- first dictionary of (stamp,data) tuples
second_list -- second dictionary of (stamp,data) tuples
offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
max_difference -- search radius for candidate generation
Output:
matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))
"""
first_keys = list(first_list.keys())
second_keys = list(second_list.keys())
potential_matches = [(abs(a - (b + offset)), a, b)
for a in first_keys
for b in second_keys
if abs(a - (b + offset)) < max_difference]
potential_matches.sort()
matches = []
for diff, a, b in potential_matches:
if a in first_keys and b in second_keys:
first_keys.remove(a)
second_keys.remove(b)
matches.append((a, b))
matches.sort()
return matches
if __name__ == '__main__':
# parse command line
parser = argparse.ArgumentParser(description='''
This script takes two data files with timestamps and associates them
''')
parser.add_argument('first_file', help='first text file (format: timestamp data)')
parser.add_argument('second_file', help='second text file (format: timestamp data)')
parser.add_argument('--first_only', help='only output associated lines from first file', action='store_true')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
args = parser.parse_args()
first_list = read_file_list(args.first_file)
second_list = read_file_list(args.second_file)
matches = associate(first_list, second_list,float(args.offset),float(args.max_difference))
if args.first_only:
for a,b in matches:
print("%f %s"%(a," ".join(first_list[a])))
else:
for a,b in matches:
print("%f %s %f %s"%(a," ".join(first_list[a]),b-float(args.offset)," ".join(second_list[b])))
| 5,346 | 40.449612 | 256 | py |
evo | evo-master/test/tum_benchmark_tools/evaluate_ate.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
This script computes the absolute trajectory error from the ground truth
trajectory and the estimated trajectory.
"""
import sys
import numpy
import argparse
import associate
def align(model,data):
"""Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
"""
numpy.set_printoptions(precision=3,suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]
return rot,trans,trans_error
def plot_traj(ax,stamps,traj,style,color,label):
"""
Plot a trajectory using matplotlib.
Input:
ax -- the plot
stamps -- time stamps (1xn)
traj -- trajectory (3xn)
style -- line style
color -- line color
label -- plot legend
"""
stamps.sort()
interval = numpy.median([s-t for s,t in zip(stamps[1:],stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i]-last < 2*interval:
x.append(traj[i][0])
y.append(traj[i][1])
elif len(x)>0:
ax.plot(x,y,style,color=color,label=label)
label=""
x=[]
y=[]
last= stamps[i]
if len(x)>0:
ax.plot(x,y,style,color=color,label=label)
if __name__=="__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)',default=1.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)')
parser.add_argument('--save_associations', help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)')
parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true')
args = parser.parse_args()
first_list = associate.read_file_list(args.first_file)
second_list = associate.read_file_list(args.second_file)
matches = associate.associate(first_list, second_list,float(args.offset),float(args.max_difference))
if len(matches)<2:
sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?")
first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a,b in matches]).transpose()
second_xyz = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for a,b in matches]).transpose()
rot,trans,trans_error = align(second_xyz,first_xyz)
second_xyz_aligned = rot * second_xyz + trans
first_stamps = list(first_list.keys())
first_stamps.sort()
first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose()
second_stamps = list(second_list.keys())
second_stamps.sort()
second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose()
second_xyz_full_aligned = rot * second_xyz_full + trans
if args.verbose:
print ("compared_pose_pairs %d pairs"%(len(trans_error)))
print ("absolute_translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)))
print ("absolute_translational_error.mean %f m"%numpy.mean(trans_error))
print ("absolute_translational_error.median %f m"%numpy.median(trans_error))
print ("absolute_translational_error.std %f m"%numpy.std(trans_error))
print ("absolute_translational_error.min %f m"%numpy.min(trans_error))
print ("absolute_translational_error.max %f m"%numpy.max(trans_error))
else:
print ("%f"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)))
if args.save_associations:
file = open(args.save_associations,"w")
file.write("\n".join(["%f %f %f %f %f %f %f %f"%(a,x1,y1,z1,b,x2,y2,z2) for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A)]))
file.close()
if args.save:
file = open(args.save,"w")
file.write("\n".join(["%f "%stamp+" ".join(["%f"%d for d in line]) for stamp,line in zip(second_stamps,second_xyz_full_aligned.transpose().A)]))
file.close()
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.patches import Ellipse
fig = plt.figure()
ax = fig.add_subplot(111)
plot_traj(ax,first_stamps,first_xyz_full.transpose().A,'-',"black","ground truth")
plot_traj(ax,second_stamps,second_xyz_full_aligned.transpose().A,'-',"blue","estimated")
label="difference"
for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A):
ax.plot([x1,x2],[y1,y2],'-',color="red",label=label)
label=""
ax.legend()
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
plt.savefig(args.plot,dpi=90)
| 8,465 | 42.193878 | 189 | py |
evo | evo-master/test/tum_benchmark_tools/evaluate_rpe.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This script computes the relative pose error from the ground truth trajectory
and the estimated trajectory.
"""
import argparse
import random
import numpy
import sys
_EPS = numpy.finfo(float).eps * 4.0
def transform44(l):
"""
Generate a 4x4 homogeneous transformation matrix from a 3D point and unit quaternion.
Input:
l -- tuple consisting of (stamp,tx,ty,tz,qx,qy,qz,qw) where
(tx,ty,tz) is the 3D position and (qx,qy,qz,qw) is the unit quaternion.
Output:
matrix -- 4x4 homogeneous transformation matrix
"""
t = l[1:4]
q = numpy.array(l[4:8], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.array((
( 1.0, 0.0, 0.0, t[0]),
( 0.0, 1.0, 0.0, t[1]),
( 0.0, 0.0, 1.0, t[2]),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
q *= numpy.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], t[0]),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], t[1]),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], t[2]),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def read_trajectory(filename, matrix=True):
"""
Read a trajectory from a text file.
Input:
filename -- file to be read
matrix -- convert poses to 4x4 matrices
Output:
dictionary of stamped 3D poses
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list_ok = []
for i,l in enumerate(list):
if l[4:8]==[0,0,0,0]:
continue
isnan = False
for v in l:
if numpy.isnan(v):
isnan = True
break
if isnan:
sys.stderr.write("Warning: line %d of file '%s' has NaNs, skipping line\n"%(i,filename))
continue
list_ok.append(l)
if matrix :
traj = dict([(l[0],transform44(l[0:])) for l in list_ok])
else:
traj = dict([(l[0],l[1:8]) for l in list_ok])
return traj
def find_closest_index(L,t):
"""
Find the index of the closest value in a list.
Input:
L -- the list
t -- value to be found
Output:
index of the closest element
"""
beginning = 0
difference = abs(L[0] - t)
best = 0
end = len(L)
while beginning < end:
middle = int((end+beginning)/2)
if abs(L[middle] - t) < difference:
difference = abs(L[middle] - t)
best = middle
if t == L[middle]:
return middle
elif L[middle] > t:
end = middle
else:
beginning = middle + 1
return best
def ominus(a,b):
"""
Compute the relative 3D transformation between a and b.
Input:
a -- first pose (homogeneous 4x4 matrix)
b -- second pose (homogeneous 4x4 matrix)
Output:
Relative 3D transformation from a to b.
"""
return numpy.dot(numpy.linalg.inv(a),b)
def scale(a,scalar):
"""
Scale the translational components of a 4x4 homogeneous matrix by a scale factor.
"""
return numpy.array(
[[a[0,0], a[0,1], a[0,2], a[0,3]*scalar],
[a[1,0], a[1,1], a[1,2], a[1,3]*scalar],
[a[2,0], a[2,1], a[2,2], a[2,3]*scalar],
[a[3,0], a[3,1], a[3,2], a[3,3]]]
)
def compute_distance(transform):
"""
Compute the distance of the translational component of a 4x4 homogeneous matrix.
"""
return numpy.linalg.norm(transform[0:3,3])
def compute_angle(transform):
"""
Compute the rotation angle from a 4x4 homogeneous matrix.
"""
# an invitation to 3-d vision, p 27
return numpy.arccos( min(1,max(-1, (numpy.trace(transform[0:3,0:3]) - 1)/2) ))
def distances_along_trajectory(traj):
"""
Compute the translational distances along a trajectory.
"""
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_distance(t)
distances.append(sum)
return distances
def rotations_along_trajectory(traj,scale):
"""
Compute the angular rotations along a trajectory.
"""
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_angle(t)*scale
distances.append(sum)
return distances
def evaluate_trajectory(traj_gt,traj_est,param_max_pairs=10000,param_fixed_delta=False,param_delta=1.00,param_delta_unit="s",param_offset=0.00,param_scale=1.00):
"""
Compute the relative pose error between two trajectories.
Input:
traj_gt -- the first trajectory (ground truth)
traj_est -- the second trajectory (estimated trajectory)
param_max_pairs -- number of relative poses to be evaluated
param_fixed_delta -- false: evaluate over all possible pairs
true: only evaluate over pairs with a given distance (delta)
param_delta -- distance between the evaluated pairs
param_delta_unit -- unit for comparison:
"s": seconds
"m": meters
"rad": radians
"deg": degrees
"f": frames
param_offset -- time offset between two trajectories (to model the delay)
param_scale -- scale to be applied to the second trajectory
Output:
list of compared poses and the resulting translation and rotation error
"""
stamps_gt = list(traj_gt.keys())
stamps_est = list(traj_est.keys())
stamps_gt.sort()
stamps_est.sort()
stamps_est_return = []
for t_est in stamps_est:
t_gt = stamps_gt[find_closest_index(stamps_gt,t_est + param_offset)]
t_est_return = stamps_est[find_closest_index(stamps_est,t_gt - param_offset)]
t_gt_return = stamps_gt[find_closest_index(stamps_gt,t_est_return + param_offset)]
if not t_est_return in stamps_est_return:
stamps_est_return.append(t_est_return)
if(len(stamps_est_return)<2):
raise Exception("Number of overlap in the timestamps is too small. Did you run the evaluation on the right files?")
if param_delta_unit=="s":
index_est = list(traj_est.keys())
index_est.sort()
elif param_delta_unit=="m":
index_est = distances_along_trajectory(traj_est)
elif param_delta_unit=="rad":
index_est = rotations_along_trajectory(traj_est,1)
elif param_delta_unit=="deg":
index_est = rotations_along_trajectory(traj_est,180/numpy.pi)
elif param_delta_unit=="f":
index_est = range(len(traj_est))
else:
raise Exception("Unknown unit for delta: '%s'"%param_delta_unit)
if not param_fixed_delta:
if(param_max_pairs==0 or len(traj_est)<numpy.sqrt(param_max_pairs)):
pairs = [(i,j) for i in range(len(traj_est)) for j in range(len(traj_est))]
else:
pairs = [(random.randint(0,len(traj_est)-1),random.randint(0,len(traj_est)-1)) for i in range(param_max_pairs)]
else:
pairs = []
for i in range(len(traj_est)):
j = find_closest_index(index_est,index_est[i] + param_delta)
if j!=len(traj_est)-1:
pairs.append((i,j))
if(param_max_pairs!=0 and len(pairs)>param_max_pairs):
pairs = random.sample(pairs,param_max_pairs)
gt_interval = numpy.median([s-t for s,t in zip(stamps_gt[1:],stamps_gt[:-1])])
gt_max_time_difference = 2*gt_interval
result = []
for i,j in pairs:
stamp_est_0 = stamps_est[i]
stamp_est_1 = stamps_est[j]
stamp_gt_0 = stamps_gt[ find_closest_index(stamps_gt,stamp_est_0 + param_offset) ]
stamp_gt_1 = stamps_gt[ find_closest_index(stamps_gt,stamp_est_1 + param_offset) ]
if(abs(stamp_gt_0 - (stamp_est_0 + param_offset)) > gt_max_time_difference or
abs(stamp_gt_1 - (stamp_est_1 + param_offset)) > gt_max_time_difference):
continue
error44 = ominus( scale(
ominus( traj_est[stamp_est_1], traj_est[stamp_est_0] ),param_scale),
ominus( traj_gt[stamp_gt_1], traj_gt[stamp_gt_0] ) )
trans = compute_distance(error44)
rot = compute_angle(error44)
result.append([stamp_est_0,stamp_est_1,stamp_gt_0,stamp_gt_1,trans,rot])
if len(result)<2:
raise Exception("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory!")
return result
def percentile(seq,q):
"""
Return the q-percentile of a list
"""
seq_sorted = list(seq)
seq_sorted.sort()
return seq_sorted[int((len(seq_sorted)-1)*q)]
if __name__ == '__main__':
random.seed(0)
parser = argparse.ArgumentParser(description='''
This script computes the relative pose error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('groundtruth_file', help='ground-truth trajectory file (format: "timestamp tx ty tz qx qy qz qw")')
parser.add_argument('estimated_file', help='estimated trajectory file (format: "timestamp tx ty tz qx qy qz qw")')
parser.add_argument('--max_pairs', help='maximum number of pose comparisons (default: 10000, set to zero to disable downsampling)', default=10000)
parser.add_argument('--fixed_delta', help='only consider pose pairs that have a distance of delta delta_unit (e.g., for evaluating the drift per second/meter/radian)', action='store_true')
parser.add_argument('--delta', help='delta for evaluation (default: 1.0)',default=1.0)
parser.add_argument('--delta_unit', help='unit of delta (options: \'s\' for seconds, \'m\' for meters, \'rad\' for radians, \'f\' for frames; default: \'s\')',default='s')
parser.add_argument('--offset', help='time offset between ground-truth and estimated trajectory (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the estimated trajectory (default: 1.0)',default=1.0)
parser.add_argument('--save', help='text file to which the evaluation will be saved (format: stamp_est0 stamp_est1 stamp_gt0 stamp_gt1 trans_error rot_error)')
parser.add_argument('--plot', help='plot the result to a file (requires --fixed_delta, output format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the mean translational error measured in meters will be printed)', action='store_true')
args = parser.parse_args()
if args.plot and not args.fixed_delta:
sys.exit("The '--plot' option can only be used in combination with '--fixed_delta'")
traj_gt = read_trajectory(args.groundtruth_file)
traj_est = read_trajectory(args.estimated_file)
result = evaluate_trajectory(traj_gt,
traj_est,
int(args.max_pairs),
args.fixed_delta,
float(args.delta),
args.delta_unit,
float(args.offset),
float(args.scale))
stamps = numpy.array(result)[:,0]
trans_error = numpy.array(result)[:,4]
rot_error = numpy.array(result)[:,5]
if args.save:
f = open(args.save,"w")
f.write("\n".join([" ".join(["%f"%v for v in line]) for line in result]))
f.close()
if args.verbose:
print ("compared_pose_pairs %d pairs"%(len(trans_error)))
print ("translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)))
print ("translational_error.mean %f m"%numpy.mean(trans_error))
print ("translational_error.median %f m"%numpy.median(trans_error))
print ("translational_error.std %f m"%numpy.std(trans_error))
print ("translational_error.min %f m"%numpy.min(trans_error))
print ("translational_error.max %f m"%numpy.max(trans_error))
print ("rotational_error.rmse %f deg"%(numpy.sqrt(numpy.dot(rot_error,rot_error) / len(rot_error)) * 180.0 / numpy.pi))
print ("rotational_error.mean %f deg"%(numpy.mean(rot_error) * 180.0 / numpy.pi))
print ("rotational_error.median %f deg"%(numpy.median(rot_error) * 180.0 / numpy.pi))
print ("rotational_error.std %f deg"%(numpy.std(rot_error) * 180.0 / numpy.pi))
print ("rotational_error.min %f deg"%(numpy.min(rot_error) * 180.0 / numpy.pi))
print ("rotational_error.max %f deg"%(numpy.max(rot_error) * 180.0 / numpy.pi))
else:
print (numpy.mean(trans_error))
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(stamps - stamps[0],trans_error,'-',color="blue")
#ax.plot([t for t,e in err_rot],[e for t,e in err_rot],'-',color="red")
ax.set_xlabel('time [s]')
ax.set_ylabel('translational error [m]')
plt.savefig(args.plot,dpi=300)
| 15,436 | 38.992228 | 192 | py |
null | qimera-main/LICENSE.md | GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
| 35,149 | 51.074074 | 78 | md |
null | qimera-main/README.md | # Qimera: Data-free Quantization with Synthetic Boundary Supporting Samples [NeurIPS 2021]
This repository is the official implementation of paper [Qimera: Data-free Quantization with Synthetic Boundary Supporting Samples].
[[Paper](https://arxiv.org/abs/2111.02625)] [[Slide](https://acsys.yonsei.ac.kr/papers/qimera_slides.pdf)]

## Requirements
* Python 3.6
* PyTorch 1.8.1
* Refer requirements.txt for other requirements
To install requirements:
```setup
pip install -r requirements.txt
```
## Training
For Imagenet training, change the path of the validation set in .hocon file.
To train the model described in the paper, run one of this command:
```train
./run_cifar10_4bit.sh
./run_cifar100_4bit.sh
./run_imgnet_resnet18_4bit.sh
./run_imgnet_resnet50_4bit.sh
./run_imgnet_mobilenet_v2_4bit.sh
```
Refer ```other_train_scripts``` folder for 5bit settings.
## Evaluation
To evaluate trained model, run the command below after training:
```eval
./eval_cifar10_4bit.sh
./eval_cifar100_4bit.sh
./eval_imgnet_resnet18_4bit.sh
./eval_imgnet_resnet50_4bit.sh
./eval_imgnet_mobilenet_v2_4bit.sh
```
## Visualizing Feature Space
Feature space visualization of real or synthetic images described in Figure 3.
```visualize
python experiments.py --pca_source
python experiments.py --gdfq_generator_path GENERATOR_WEIGHT_PATH --pca_gdfq --image_gdfq
python experiments.py --qimera_generator_path GENERATOR_WEIGHT_PATH --pca_qimera --pca_mix --pca_path --image_gdfq --image_mix
```
## Results
Our model achieves the following performance on :
| Dataset | Model | Teacher Net Accuracy | 4bit Quantized Model Accuracy | 5bit Quantized Model Accuracy |
| --------------- |-------------|---------------- | -------------- |-------------- |
| Cifar-10 | ResNet-20 | 93.89% | 91.26% | 93.46% |
| Cifar-100 | ResNet-20 | 70.33% | 65.10% | 69.02% |
| ImageNet | ResNet-18 | 71.47% | 63.84% | 69.29% |
| ImageNet | ResNet-50 | 77.73% | 66.25% | 75.32% |
| ImageNet | MobileNetV2 | 73.03% | 61.62% | 70.45% |
Generated Synthetic Images for Cifar10 :
</br>

## License
This project is licensed under the terms of the GNU General Public License v3.0
## Citation
```
@inproceedings{choi2021qimera,
title={Qimera: Data-free Quantization with Synthetic Boundary Supporting Samples},
author={Choi, Kanghyun and Hong, Deokki and Park, Noseong and Kim, Youngsok and Lee, Jinho},
booktitle={Thirty-Fifth Conference on Neural Information Processing Systems},
year={2021}
}
```
| 2,769 | 30.477273 | 133 | md |
null | qimera-main/conditional_batchnorm.py | # -*- coding: utf-8 -*-
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class ConditionalBatchNorm2d(nn.BatchNorm2d):
"""Conditional Batch Normalization"""
def __init__(self, num_features, eps=1e-05, momentum=0.1,
affine=False, track_running_stats=True):
super(ConditionalBatchNorm2d, self).__init__(
num_features, eps, momentum, affine, track_running_stats
)
def forward(self, input, weight, bias, **kwargs):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
output = F.batch_norm(input, self.running_mean, self.running_var,
self.weight, self.bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
if weight.dim() == 1:
weight = weight.unsqueeze(0)
if bias.dim() == 1:
bias = bias.unsqueeze(0)
size = output.size()
weight = weight.unsqueeze(-1).unsqueeze(-1).expand(size)
bias = bias.unsqueeze(-1).unsqueeze(-1).expand(size)
return weight * output + bias
class CategoricalConditionalBatchNorm2d(ConditionalBatchNorm2d):
def __init__(self, num_classes, num_features, eps=1e-5, momentum=0.1,
affine=False, track_running_stats=True):
super(CategoricalConditionalBatchNorm2d, self).__init__(
num_features, eps, momentum, affine, track_running_stats
)
self.weights = nn.Embedding(num_classes, num_features)
self.biases = nn.Embedding(num_classes, num_features)
self._initialize()
def _initialize(self):
init.ones_(self.weights.weight.data)
init.zeros_(self.biases.weight.data)
def forward(self, input, c, linear=None,**kwargs):
weight = self.weights(c)
bias = self.biases(c)
if linear != None:
weight = (weight * linear.unsqueeze(2)).mean(dim=1)
bias = (bias * linear.unsqueeze(2)).mean(dim=1)
return super(CategoricalConditionalBatchNorm2d, self).forward(input, weight, bias)
if __name__ == '__main__':
"""Forward computation check."""
import torch
size = (3, 3, 12, 12)
batch_size, num_features = size[:2]
print('# Affirm embedding output')
naive_bn = nn.BatchNorm2d(3)
idx_input = torch.tensor([1, 2, 0], dtype=torch.long)
embedding = nn.Embedding(3, 3)
weights = embedding(idx_input)
print('# weights size', weights.size())
empty = torch.tensor((), dtype=torch.float)
running_mean = empty.new_zeros((3,))
running_var = empty.new_ones((3,))
naive_bn_W = naive_bn.weight
# print('# weights from embedding | type {}\n'.format(type(weights)), weights)
# print('# naive_bn_W | type {}\n'.format(type(naive_bn_W)), naive_bn_W)
input = torch.rand(*size, dtype=torch.float32)
print('input size', input.size())
print('input ndim ', input.dim())
_ = naive_bn(input)
print('# batch_norm with given weights')
try:
with torch.no_grad():
output = F.batch_norm(input, running_mean, running_var,
weights, naive_bn.bias, False, 0.0, 1e-05)
except Exception as e:
print("\tFailed to use given weights")
print('# Error msg:', e)
print()
else:
print("Succeeded to use given weights")
print('\n# Batch norm before use given weights')
with torch.no_grad():
tmp_out = F.batch_norm(input, running_mean, running_var,
naive_bn_W, naive_bn.bias, False, .0, 1e-05)
weights_cast = weights.unsqueeze(-1).unsqueeze(-1)
weights_cast = weights_cast.expand(tmp_out.size())
try:
out = weights_cast * tmp_out
except Exception:
print("Failed")
else:
print("Succeeded!")
print('\t {}'.format(out.size()))
print(type(tuple(out.size())))
print('--- condBN and catCondBN ---')
catCondBN = CategoricalConditionalBatchNorm2d(3, 3)
output = catCondBN(input, idx_input)
assert tuple(output.size()) == size
condBN = ConditionalBatchNorm2d(3)
idx = torch.tensor([1], dtype=torch.long)
out = catCondBN(input, idx)
print('cat cond BN weights\n', catCondBN.weights.weight.data)
print('cat cond BN biases\n', catCondBN.biases.weight.data)
| 4,783 | 33.417266 | 90 | py |
null | qimera-main/dataloader.py | """
data loder for loading data
"""
import os
import math
import torch
import torch.utils.data as data
import numpy as np
from PIL import Image
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import struct
__all__ = ["DataLoader", "PartDataLoader"]
class ImageLoader(data.Dataset):
def __init__(self, dataset_dir, transform=None, target_transform=None):
class_list = os.listdir(dataset_dir)
datasets = []
for cla in class_list:
cla_path = os.path.join(dataset_dir, cla)
files = os.listdir(cla_path)
for file_name in files:
file_path = os.path.join(cla_path, file_name)
if os.path.isfile(file_path):
# datasets.append((file_path, tuple([float(v) for v in int(cla)])))
datasets.append((file_path, [float(cla)]))
# print(datasets)
# assert False
self.dataset_dir = dataset_dir
self.datasets = datasets
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
frames = []
file_path, label = self.datasets[index]
noise = torch.load(file_path, map_location=torch.device('cpu'))
return noise, torch.Tensor(label)
def __len__(self):
return len(self.datasets)
class DataLoader(object):
"""
data loader for CV data sets
"""
def __init__(self, dataset, batch_size, n_threads=4,
ten_crop=False, data_path='/home/dataset/', logger=None):
"""
create data loader for specific data set
:params n_treads: number of threads to load data, default: 4
:params ten_crop: use ten crop for testing, default: False
:params data_path: path to data set, default: /home/dataset/
"""
self.dataset = dataset
self.batch_size = batch_size
self.n_threads = n_threads
self.ten_crop = ten_crop
self.data_path = data_path
self.logger = logger
self.dataset_root = data_path
self.logger.info("|===>Creating data loader for " + self.dataset)
if self.dataset in ["cifar100","cifar10"]:
self.train_loader, self.test_loader = self.cifar(
dataset=self.dataset)
elif self.dataset in ["imagenet"]:
self.train_loader, self.test_loader = self.imagenet(
dataset=self.dataset)
else:
assert False, "invalid data set"
def getloader(self):
"""
get train_loader and test_loader
"""
return self.train_loader, self.test_loader
def imagenet(self, dataset="imagenet"):
traindir = os.path.join(self.data_path, "train")
testdir = os.path.join(self.data_path, "val")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = None
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
test_loader = torch.utils.data.DataLoader(
dsets.ImageFolder(testdir, test_transform),
batch_size=self.batch_size,
shuffle=False,
num_workers=self.n_threads,
pin_memory=False)
return train_loader, test_loader
def cifar(self, dataset="cifar100"):
"""
dataset: cifar
"""
if dataset == "cifar10":
norm_mean = [0.49139968, 0.48215827, 0.44653124]
norm_std = [0.24703233, 0.24348505, 0.26158768]
elif dataset == "cifar100":
norm_mean = [0.50705882, 0.48666667, 0.44078431]
norm_std = [0.26745098, 0.25568627, 0.27607843]
else:
assert False, "Invalid cifar dataset"
test_data_root = self.dataset_root
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)])
if self.dataset == "cifar10":
test_dataset = dsets.CIFAR10(root=test_data_root,
train=False,
transform=test_transform,download=True)
elif self.dataset == "cifar100":
test_dataset = dsets.CIFAR100(root=test_data_root,
train=False,
transform=test_transform,
download=True)
else:
assert False, "invalid data set"
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=200,
shuffle=False,
pin_memory=True,
num_workers=self.n_threads)
return None, test_loader
| 4,226 | 26.627451 | 72 | py |
null | qimera-main/eval_cifar100_4bit.sh | #!/bin/bash
python main.py --conf_path ./cifar100_resnet20.hocon --multi_label_prob 0.4 --multi_label_num 10 --id 01 --eval | 124 | 40.666667 | 111 | sh |
null | qimera-main/eval_cifar10_4bit.sh | #!/bin/bash
python main.py --conf_path ./cifar10_resnet20.hocon --multi_label_prob 0.4 --multi_label_num 2 --id 01 --randemb --eval | 132 | 43.333333 | 119 | sh |
null | qimera-main/eval_imgnet_mobilenet_v2_4bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_mobilenet_v2.hocon --multi_label_prob 0.4 --multi_label_num 500 --id 01 --randemb --eval | 139 | 45.666667 | 126 | sh |
null | qimera-main/eval_imgnet_resnet18_4bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_resnet18.hocon --multi_label_prob 0.4 --multi_label_num 100 --id 01 --eval | 125 | 41 | 112 | sh |
null | qimera-main/eval_imgnet_resnet50_4bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_resnet50.hocon --multi_label_prob 0.7 --multi_label_num 500 --id 01 --eval | 125 | 41 | 112 | sh |
null | qimera-main/experiments.py | import os
import argparse
import numpy as np
import pandas as pd
from scipy.linalg import eigh
import torch
import torch.nn as nn
import torchvision
from torchvision import datasets, transforms
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from pytorchcv.model_provider import get_model
from generator import GeneratorGDFQ, GeneratorQimera
parser = argparse.ArgumentParser(description='PCA features & generate images')
parser.add_argument('--pca_source', action='store_true')
parser.add_argument('--pca_gdfq', action='store_true')
parser.add_argument('--pca_qimera', action='store_true')
parser.add_argument('--pca_mix', action='store_true')
parser.add_argument('--pca_path', action='store_true')
parser.add_argument('--num_dot_per_mix', type=int, default=200)
parser.add_argument('--num_dot_per_path', type=int, default=200)
parser.add_argument('--image_gdfq', action='store_true')
parser.add_argument('--image_qimera', action='store_true')
parser.add_argument('--image_mix', action='store_true')
parser.add_argument('--gdfq_generator_path', type=str)
parser.add_argument('--qimera_generator_path', type=str)
def reduce_df(dataframe, num_per_class):
df_list = []
for i in range(10):
df_list.append(dataframe.iloc[i * 5000: i * 5000 + num_per_class])
df = pd.concat(df_list)
return df
class GeneratorGDFQ(nn.Module):
def __init__(self, options=None, conf_path=None):
super(GeneratorGDFQ, self).__init__()
self.label_emb = nn.Embedding(10, 100)
self.init_size = 32 // 4
self.l1 = nn.Sequential(nn.Linear(100, 128 * self.init_size ** 2))
self.conv_blocks0 = nn.Sequential(
nn.BatchNorm2d(128),
)
self.conv_blocks1 = nn.Sequential(
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv_blocks2 = nn.Sequential(
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 3, 3, stride=1, padding=1),
nn.Tanh(),
nn.BatchNorm2d(3, affine=False)
)
def forward(self, z, labels):
gen_input = torch.mul(self.label_emb(labels), z)
out = self.l1(gen_input)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks0(out)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks1(img)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks2(img)
return img
class GeneratorQimera(nn.Module):
def __init__(self, options=None, conf_path=None, teacher_weight=None, freeze=True, fc_reduce=False):
super(GeneratorQimera, self).__init__()
self.fc_reduce = fc_reduce
if teacher_weight==None:
self.label_emb = nn.Embedding(10, 64)
else:
self.label_emb = nn.Embedding.from_pretrained(teacher_weight, freeze=freeze)
self.embed_normalizer = nn.BatchNorm1d(self.label_emb.weight.T.shape,affine=False,track_running_stats=False)
if fc_reduce:
self.fc_reducer = nn.Linear(in_features=self.label_emb.weight.shape[-1], out_features=64)
self.init_size = 32 // 4
self.l1 = nn.Sequential(nn.Linear(64, 128 * self.init_size ** 2))
self.conv_blocks0 = nn.Sequential(
nn.BatchNorm2d(128),
)
self.conv_blocks1 = nn.Sequential(
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv_blocks2 = nn.Sequential(
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 3, 3, stride=1, padding=1),
nn.Tanh(),
nn.BatchNorm2d(3, affine=False)
)
def forward(self, z, labels, linear=None, z2=None):
if linear == None:
gen_input = self.embed_normalizer(torch.add(self.label_emb(labels),z).T).T #noise before norm
if self.fc_reduce:
embed_norm = self.fc_reducer(embed_norm)
else:
embed_norm = self.embed_normalizer(torch.add(self.label_emb(labels),z).T).T #sep noise before norm
if self.fc_reduce:
embed_norm = self.fc_reducer(embed_norm)
gen_input = (embed_norm * linear.unsqueeze(2)).sum(dim=1)
out = self.l1(gen_input)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks0(out)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks1(img)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks2(img)
return img
if __name__ == '__main__':
args = parser.parse_args()
if True in [args.pca_gdfq, args.image_gdfq]:
generator_GDFQ = GeneratorGDFQ().cuda()
generator_GDFQ.load_state_dict(torch.load(args.gdfq_generator_path, map_location="cuda:0"))
generator_GDFQ.eval()
if True in [args.pca_qimera, args.pca_mix, args.pca_path, args.image_qimera, args.image_mix]:
generator_qimera = GeneratorQimera().cuda()
generator_qimera.load_state_dict(torch.load(args.qimera_generator_path, map_location="cuda:0"))
generator_qimera.eval()
if True in [args.pca_source, args.pca_gdfq, args.pca_qimera, args.pca_mix, args.pca_path]:
os.makedirs('./pca_results', exist_ok=True)
net = get_model("resnet20_cifar10", pretrained=True)
net = net.cuda()
net.eval()
print('teacher network is ready')
train_set = datasets.CIFAR10('./', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.49139968, 0.48215827, 0.44653124), (0.24703233, 0.24348505, 0.26158768))
]))
test_set = datasets.CIFAR10('./', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.49139968, 0.48215827, 0.44653124), (0.24703233, 0.24348505, 0.26158768))
]))
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=64, shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=128, shuffle=False)
correct = 0
total = 0
with torch.no_grad():
for i, (data, target) in enumerate(test_loader):
data, target = data.cuda(), target.cuda()
output = net(data)
_, pred = output.max(1)
correct += (pred == target).sum()
total += len(target)
print(f'teacher net accuracy: {correct / total * 100 : .3f}')
features = []
targets = []
with torch.no_grad():
for i, (data, target) in enumerate(train_loader):
data = data.cuda()
_, feature = net(data, out_feature=True)
features.append(feature.cpu())
targets.append(target)
features_cat = torch.cat(features, dim=0)
targets_cat = torch.cat(targets, dim=0)
feature_scaler = StandardScaler().fit(features_cat)
standardized_data = feature_scaler.transform(features_cat)
covar_matrix = np.matmul(standardized_data.T , standardized_data)
values, vectors = eigh(covar_matrix, eigvals=(62,63))
vectors = vectors.T
if args.pca_source:
print('pca of source data')
new_coordinates = np.matmul(vectors, standardized_data.T)
new_coordinates = np.vstack((new_coordinates, targets_cat)).T
dataframe = pd.DataFrame(data=new_coordinates, columns=("1st_principal", "2nd_principal", "label"))
dataframe.sort_values(by=['label'], axis=0, inplace=True)
df = reduce_df(dataframe, 1000)
pca_result = sns.FacetGrid(df, hue="label", height=10, hue_kws={'marker':['x'] * 10}).map(plt.scatter, '1st_principal', '2nd_principal')
pca_result.set(xticks=[], yticks=[], xlabel='', ylabel='')
plt.savefig('./pca_results/pca_source.png')
if args.pca_gdfq:
print('pca of GDFQ')
features_GDFQ = []
targets_GDFQ = []
with torch.no_grad():
for i in range(50):
z = torch.randn(200, 100).cuda()
labels = (torch.ones(200) * (i // 5)).type(torch.LongTensor)
targets_GDFQ.append(labels)
labels = labels.cuda()
z = z.contiguous()
labels = labels.contiguous()
images = generator_GDFQ(z, labels)
_, feature = net(images, out_feature=True)
features_GDFQ.append(feature.cpu())
features_cat_GDFQ = torch.cat(features_GDFQ, dim=0)
targets_cat_GDFQ = torch.cat(targets_GDFQ, dim=0)
standardized_data_GDFQ = feature_scaler.transform(features_cat_GDFQ)
new_coordinates_GDFQ = np.matmul(vectors, standardized_data_GDFQ.T)
new_coordinates_GDFQ = np.vstack((new_coordinates_GDFQ, targets_cat_GDFQ)).T
dataframe_GDFQ = pd.DataFrame(data=new_coordinates_GDFQ, columns=("1st_principal", "2nd_principal", "label"))
pca_result = sns.FacetGrid(dataframe_GDFQ, hue="label", height=10, hue_kws={'marker':['x'] * 10}).map(plt.scatter, '1st_principal', '2nd_principal')
pca_result.set(xticks=[], yticks=[], xlabel='', ylabel='')
plt.savefig('./pca_results/pca_gdfq.png')
if args.pca_qimera or args.pca_mix or args.pca_path:
print('pca of Qimera')
features_qimera = []
targets_qimera = []
with torch.no_grad():
for i in range(50):
z = torch.randn(200, 64).cuda()
labels = (torch.ones(200) * (i // 5)).type(torch.LongTensor)
targets_qimera.append(labels)
labels = labels.cuda()
z = z.contiguous()
labels = labels.contiguous()
images = generator_qimera(z, labels)
_, feature = net(images, out_feature=True)
features_qimera.append(feature.cpu())
features_cat_qimera = torch.cat(features_qimera, dim=0)
targets_cat_qimera = torch.cat(targets_qimera, dim=0)
standardized_data_qimera = feature_scaler.transform(features_cat_qimera)
new_coordinates_qimera = np.matmul(vectors, standardized_data_qimera.T)
new_coordinates_qimera = np.vstack((new_coordinates_qimera, targets_cat_qimera)).T
dataframe_qimera = pd.DataFrame(data=new_coordinates_qimera, columns=("1st_principal", "2nd_principal", "label"))
if args.pca_qimera:
pca_result = sns.FacetGrid(dataframe_qimera, hue="label", height=10, hue_kws={'marker':['x'] * 10}).map(plt.scatter, '1st_principal', '2nd_principal')
pca_result.set(xticks=[], yticks=[], xlabel='', ylabel='')
plt.savefig('./pca_results/pca_qimera.png')
if args.pca_mix or args.pca_path:
print('pca of embedding superposing')
linear = []
for i in range(args.num_dot_per_mix):
linear.append(torch.tensor([[i / args.num_dot_per_mix, 1 - (i / args.num_dot_per_mix)]]))
linear = torch.cat(linear, dim=0)
features_qimera_mix = []
targets_qimera_mix = []
with torch.no_grad():
for i in range(10):
for j in range(i + 1, 10):
z = torch.randn(args.num_dot_per_mix, 2, 64).cuda()
z = z.contiguous()
labels = torch.tensor([[i, j]])
labels = torch.cat([labels] * args.num_dot_per_mix, dim=0).cuda()
target = torch.cat([torch.tensor([300])] * args.num_dot_per_mix, dim=0)
targets_qimera_mix.append(target)
l = linear.cuda()
images = generator_qimera(z, labels, l)
_, feature = net(images, out_feature=True)
features_qimera_mix.append(feature.cpu())
features_cat_qimera_mix = torch.cat(features_qimera_mix, dim=0)
targets_cat_qimera_mix = torch.cat(targets_qimera_mix, dim=0)
standardized_data_qimera_mix = feature_scaler.transform(features_cat_qimera_mix)
new_coordinates_qimera_mix = np.matmul(vectors, standardized_data_qimera_mix.T)
new_coordinates_qimera_mix = np.vstack((new_coordinates_qimera_mix, targets_cat_qimera_mix)).T
dataframe_qimera_mix = pd.DataFrame(data=new_coordinates_qimera_mix, columns=("1st_principal", "2nd_principal", "label"))
if args.pca_mix:
pca_result = sns.FacetGrid(
pd.concat([dataframe_qimera_mix, dataframe_qimera]),
hue="label",
hue_order=[300.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] ,
height=10,
hue_kws={'marker':['x'] * 11, 'color': ['black'] + sns.color_palette("tab10")[:10]}
).map(plt.scatter, '1st_principal', '2nd_principal')
pca_result.set(xticks=[], yticks=[], xlabel='', ylabel='')
plt.savefig('./pca_results/pca_qimera_mix.png')
if args.pca_path:
print('pca of path')
linear = []
for i in range(args.num_dot_per_path):
linear.append(torch.tensor([[i / args.num_dot_per_path, 1 - (i / args.num_dot_per_path)]]))
linear = torch.cat(linear, dim=0)
features_no_noise = []
targets_no_noise = []
with torch.no_grad():
for i in range(10):
for j in range(i + 1, 10):
z = torch.zeros(args.num_dot_per_path, 2, 64).cuda()
z = z.contiguous()
labels = torch.tensor([[i, j]])
labels = torch.cat([labels] * args.num_dot_per_path, dim=0).cuda()
target = torch.tensor([(i + 1) * 10 + j])
target = torch.cat([target] * args.num_dot_per_path, dim=0)
targets_no_noise.append(target)
l = linear.cuda()
images = generator_qimera(z, labels, l)
_, feature = net(images, out_feature=True)
features_no_noise.append(feature.cpu())
features_cat_no_noise = torch.cat(features_no_noise, dim=0)
targets_cat_no_noise = torch.cat(targets_no_noise, dim=0)
standardized_data_no_noise = feature_scaler.transform(features_cat_no_noise)
new_coordinates_no_noise = np.matmul(vectors, standardized_data_no_noise.T)
new_coordinates_no_noise = np.vstack((new_coordinates_no_noise, targets_cat_no_noise)).T
dataframe_no_noise = pd.DataFrame(data=new_coordinates_no_noise, columns=("1st_principal", "2nd_principal", "label"))
MID_DOT_NUM = 11
linear = []
for i in range(MID_DOT_NUM):
linear.append(torch.tensor([[i/(MID_DOT_NUM - 1), 1 - (i/(MID_DOT_NUM - 1))]]))
linear = torch.cat(linear, dim=0)
features_no_noise_ten = []
targets_no_noise_ten = []
with torch.no_grad():
for i in range(10):
for j in range(i + 1, 10):
z = torch.zeros(MID_DOT_NUM, 2, 64).cuda()
z = z.contiguous()
labels = torch.tensor([[i, j]])
labels = torch.cat([labels] * MID_DOT_NUM, dim=0).cuda()
target = torch.tensor([(i + 1) * 10 + j + 500])
target = torch.cat([target] * MID_DOT_NUM, dim=0)
targets_no_noise_ten.append(target)
l = linear.cuda()
images = generator_qimera(z, labels, l)
_, feature = net(images, out_feature=True)
features_no_noise_ten.append(feature.cpu())
features_cat_no_noise_ten = torch.cat(features_no_noise_ten, dim=0)
targets_cat_no_noise_ten = torch.cat(targets_no_noise_ten, dim=0)
color = sns.color_palette("tab10")
features_total = torch.cat([features_cat_qimera, features_cat_no_noise, features_cat_qimera_mix, features_cat_no_noise_ten], dim=0)
targets_total = torch.cat([targets_cat_qimera, targets_cat_no_noise, targets_cat_qimera_mix, targets_cat_no_noise_ten], dim=0)
no_noise_start = len(targets_cat_qimera)
mix_start = no_noise_start + len(targets_cat_no_noise)
no_noise_ten_start = mix_start + len(targets_cat_qimera_mix)
standardized_data_total = feature_scaler.transform(features_total)
x = 0
for i in range(10):
for j in range(i + 1, 10):
if j - i == 1:
selected_features = torch.cat([
features_cat_qimera[i * 1000: (i + 1) * 1000],
features_cat_qimera[j * 1000: (j + 1) * 1000],
features_cat_no_noise[x * args.num_dot_per_path: (x + 1) * args.num_dot_per_path],
features_cat_qimera_mix[x * args.num_dot_per_mix: (x + 1) * args.num_dot_per_mix],
features_cat_no_noise_ten[x * MID_DOT_NUM: (x + 1) * MID_DOT_NUM]
] , dim=0)
selected_targets = torch.cat([
targets_cat_qimera[i * 1000: (i + 1) * 1000],
targets_cat_qimera[j * 1000: (j + 1) * 1000],
targets_cat_no_noise[x * args.num_dot_per_path: (x + 1) * args.num_dot_per_path],
targets_cat_qimera_mix[x * args.num_dot_per_mix: (x + 1) * args.num_dot_per_mix],
targets_cat_no_noise_ten[x * MID_DOT_NUM: (x + 1) * MID_DOT_NUM]
], dim=0)
standardized_data_selected = feature_scaler.transform(selected_features)
covar_matrix_selected = np.matmul(standardized_data_selected.T , standardized_data_selected)
values_selected, vectors_selected = eigh(covar_matrix_selected, eigvals=(62,63))
vectors_selected = vectors_selected.T
new_coordinates_selected = np.matmul(vectors_selected, standardized_data_selected.T)
new_coordinates_selected = np.vstack((new_coordinates_selected, selected_targets)).T
df_selected = pd.DataFrame(data=new_coordinates_selected, columns=("1st_principal", "2nd_principal", "label"))
pca_result = sns.FacetGrid(df_selected, hue="label", height=10, hue_kws={'marker':['o', 'o', 'o', 'o', 'o'], 's':[30, 30, 30, 30, 300], 'color':[color[i], color[j], 'black', 'lightgreen', 'black'],}).map(plt.scatter, '1st_principal', '2nd_principal')
pca_result.set(xticks=[], yticks=[], xlabel='', ylabel='')
plt.savefig(f'pca_results/pca_path_{i}_{j}.png')
x += 1
if True in [args.image_gdfq, args.image_qimera, args.image_mix]:
os.makedirs('./generated_images', exist_ok=True)
if args.image_gdfq:
print('generate images with GDFQ')
images_GDFQ = []
with torch.no_grad():
z_GDFQ = torch.randn(10, 100).cuda()
z_GDFQ = z_GDFQ.contiguous()
for i in range(10):
labels = (torch.ones(10) * i).type(torch.LongTensor).cuda()
labels = labels.contiguous()
image_GDFQ = generator_GDFQ(z_GDFQ, labels)
images_GDFQ.append(image_GDFQ)
images_GDFQ = torch.cat(images_GDFQ, dim=0)
torchvision.utils.save_image(images_GDFQ, './generated_images/GDFQ.png', nrow=10)
if args.image_qimera:
print('generate images with Qimera')
images_qimera = []
with torch.no_grad():
z_qimera = torch.randn(10, 64).cuda()
z_qimera = z_qimera.contiguous()
for i in range(10):
labels = (torch.ones(10) * i).type(torch.LongTensor).cuda()
labels = labels.contiguous()
image_qimera = generator_qimera(z_qimera, labels)
images_qimera.append(image_qimera)
images_qimera = torch.cat(images_qimera, dim=0)
torchvision.utils.save_image(images_qimera, './generated_images/qimera.png', nrow=10)
if args.image_mix:
print('generate images with Qimera & embedding superposing')
images_qimera_multilabel = []
with torch.no_grad():
z = torch.randn(1, 64).cuda()
z = z.contiguous()
for i in range(10):
for j in range(10):
labels = torch.tensor([[i, j]]).cuda()
labels = labels.contiguous()
linear = torch.nn.functional.softmax(torch.ones(1, 2),dim=1).cuda()
images = generator_qimera(z, labels, linear)
images_qimera_multilabel.append(images)
images = torch.cat(images_qimera_multilabel, dim=0)
torchvision.utils.save_image(images, f'./generated_images/qimera_mix.png', nrow=10)
| 23,289 | 46.921811 | 282 | py |
null | qimera-main/main.py | import argparse
import datetime
import logging
import os
import time
import traceback
import sys
import copy
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.nn as nn
from torch import pca_lowrank
# option file should be modified according to your expriment
from options import Option
from dataloader import DataLoader
from trainer import Trainer
import utils as utils
from quantization_utils.quant_modules import *
from pytorchcv.model_provider import get_model as ptcv_get_model
from conditional_batchnorm import CategoricalConditionalBatchNorm2d
class Generator(nn.Module):
def __init__(self, options=None, conf_path=None, teacher_weight=None, freeze=True):
super(Generator, self).__init__()
self.settings = options or Option(conf_path)
if teacher_weight==None:
self.label_emb = nn.Embedding(self.settings.nClasses, self.settings.latent_dim)
else:
self.label_emb = nn.Embedding.from_pretrained(teacher_weight, freeze=freeze)
self.embed_normalizer = nn.BatchNorm1d(self.label_emb.weight.T.shape,affine=False,track_running_stats=False)
if not self.settings.no_DM:
self.fc_reducer = nn.Linear(in_features=self.label_emb.weight.shape[-1], out_features=self.settings.intermediate_dim)
self.init_size = self.settings.img_size // 4
self.l1 = nn.Sequential(nn.Linear(self.settings.intermediate_dim, 128 * self.init_size ** 2))
else:
self.init_size = self.settings.img_size // 4
self.l1 = nn.Sequential(nn.Linear(self.settings.latent_dim, 128 * self.init_size ** 2))
self.conv_blocks0 = nn.Sequential(
nn.BatchNorm2d(128),
)
self.conv_blocks1 = nn.Sequential(
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv_blocks2 = nn.Sequential(
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, self.settings.channels, 3, stride=1, padding=1),
nn.Tanh(),
nn.BatchNorm2d(self.settings.channels, affine=False)
)
def forward(self, z, labels, linear=None, z2=None):
if linear == None:
gen_input = self.embed_normalizer(torch.add(self.label_emb(labels),self.settings.noise_scale*z).T).T
if not self.settings.no_DM:
gen_input = self.fc_reducer(gen_input)
else:
embed_norm = self.embed_normalizer(torch.add(self.label_emb(labels),self.settings.noise_scale*z).T).T
if not self.settings.no_DM:
gen_input = self.fc_reducer(embed_norm)
else:
gen_input = embed_norm
gen_input = (gen_input * linear.unsqueeze(2)).sum(dim=1)
out = self.l1(gen_input)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks0(out)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks1(img)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks2(img)
return img
class Generator_imagenet(nn.Module):
def __init__(self, options=None, conf_path=None, teacher_weight=None, freeze=True):
self.settings = options or Option(conf_path)
super(Generator_imagenet, self).__init__()
self.settings = options or Option(conf_path)
if teacher_weight==None:
self.label_emb = nn.Embedding(self.settings.nClasses, self.settings.latent_dim)
else:
self.label_emb = nn.Embedding.from_pretrained(teacher_weight, freeze=freeze)
self.embed_normalizer = nn.BatchNorm1d(self.label_emb.weight.T.shape,affine=False,track_running_stats=False)
if not self.settings.no_DM:
self.fc_reducer = nn.Linear(in_features=self.label_emb.weight.shape[-1], out_features=self.settings.intermediate_dim)
self.init_size = self.settings.img_size // 4
self.l1 = nn.Sequential(nn.Linear(self.settings.intermediate_dim, 128 * self.init_size ** 2))
else:
self.init_size = self.settings.img_size // 4
self.l1 = nn.Sequential(nn.Linear(self.settings.latent_dim, 128 * self.init_size ** 2))
self.conv_blocks0_0 = CategoricalConditionalBatchNorm2d(1000, 128)
self.conv_blocks1_0 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.conv_blocks1_1 = CategoricalConditionalBatchNorm2d(1000, 128, 0.8)
self.conv_blocks1_2 = nn.LeakyReLU(0.2, inplace=True)
self.conv_blocks2_0 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.conv_blocks2_1 = CategoricalConditionalBatchNorm2d(1000, 64, 0.8)
self.conv_blocks2_2 = nn.LeakyReLU(0.2, inplace=True)
self.conv_blocks2_3 = nn.Conv2d(64, self.settings.channels, 3, stride=1, padding=1)
self.conv_blocks2_4 = nn.Tanh()
self.conv_blocks2_5 = nn.BatchNorm2d(self.settings.channels, affine=False)
def forward(self, z, labels, linear=None):
if linear == None:
gen_input = self.embed_normalizer(torch.add(self.label_emb(labels),z).T).T
if not self.settings.no_DM:
gen_input = self.fc_reducer(gen_input)
else:
embed_norm = self.embed_normalizer(torch.add(self.label_emb(labels),z).T).T
if not self.settings.no_DM:
gen_input = self.fc_reducer(embed_norm)
else:
gen_input = embed_norm
gen_input = (gen_input * linear.unsqueeze(2)).sum(dim=1)
out = self.l1(gen_input)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks0_0(out, labels, linear=linear)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks1_0(img)
img = self.conv_blocks1_1(img, labels, linear=linear)
img = self.conv_blocks1_2(img)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks2_0(img)
img = self.conv_blocks2_1(img, labels, linear=linear)
img = self.conv_blocks2_2(img)
img = self.conv_blocks2_3(img)
img = self.conv_blocks2_4(img)
img = self.conv_blocks2_5(img)
return img
class ExperimentDesign:
def __init__(self, generator=None, options=None, conf_path=None):
self.settings = options or Option(conf_path)
self.generator = generator
self.train_loader = None
self.test_loader = None
self.model = None
self.model_teacher = None
self.optimizer_state = None
self.trainer = None
self.start_epoch = 0
self.test_input = None
self.unfreeze_Flag = True
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = self.settings.visible_devices
self.settings.set_save_path()
self.logger = self.set_logger()
self.settings.paramscheck(self.logger)
self.prepare()
def set_logger(self):
logger = logging.getLogger('baseline')
file_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console_formatter = logging.Formatter('%(message)s')
# file log
file_handler = logging.FileHandler(os.path.join(self.settings.save_path, "train_test.log"))
file_handler.setFormatter(file_formatter)
# console log
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
return logger
def prepare(self):
self._set_gpu()
self._set_dataloader()
self._set_model()
self._replace()
self.logger.info(self.model)
self._set_trainer()
def _set_gpu(self):
torch.manual_seed(self.settings.manualSeed)
torch.cuda.manual_seed(self.settings.manualSeed)
assert self.settings.GPU <= torch.cuda.device_count() - 1, "Invalid GPU ID"
cudnn.benchmark = True
def _set_dataloader(self):
# create data loader
data_loader = DataLoader(dataset=self.settings.dataset,
batch_size=self.settings.batchSize,
data_path=self.settings.dataPath,
n_threads=self.settings.nThreads,
ten_crop=self.settings.tenCrop,
logger=self.logger)
self.train_loader, self.test_loader = data_loader.getloader()
def _set_model(self):
if self.settings.dataset in ["cifar100","cifar10"]:
if self.settings.network in ["resnet20_cifar100","resnet20_cifar10"]:
self.test_input = Variable(torch.randn(1, 3, 32, 32).cuda())
self.model = ptcv_get_model(self.settings.network, pretrained=True)
self.model_teacher = ptcv_get_model(self.settings.network, pretrained=True)
self.model_teacher.eval()
else:
assert False, "unsupport network: " + self.settings.network
elif self.settings.dataset in ["imagenet"]:
if self.settings.network in ["resnet18","resnet50","mobilenetv2_w1"]:
self.test_input = Variable(torch.randn(1, 3, 224, 224).cuda())
self.model = ptcv_get_model(self.settings.network, pretrained=True)
self.model_teacher = ptcv_get_model(self.settings.network, pretrained=True)
self.model_teacher.eval()
else:
assert False, "unsupport network: " + self.settings.network
else:
assert False, "unsupport data set: " + self.settings.dataset
def _set_trainer(self):
# set lr master
lr_master_S = utils.LRPolicy(self.settings.lr_S,
self.settings.nEpochs,
self.settings.lrPolicy_S)
lr_master_G = utils.LRPolicy(self.settings.lr_G,
self.settings.nEpochs,
self.settings.lrPolicy_G)
params_dict_S = {
'step': self.settings.step_S,
'decay_rate': self.settings.decayRate_S
}
params_dict_G = {
'step': self.settings.step_G,
'decay_rate': self.settings.decayRate_G
}
lr_master_S.set_params(params_dict=params_dict_S)
lr_master_G.set_params(params_dict=params_dict_G)
# set trainer
self.trainer = Trainer(
model=self.model,
model_teacher=self.model_teacher,
generator = self.generator,
train_loader=self.train_loader,
test_loader=self.test_loader,
lr_master_S=lr_master_S,
lr_master_G=lr_master_G,
settings=self.settings,
logger=self.logger,
opt_type=self.settings.opt_type,
optimizer_state=self.optimizer_state,
run_count=self.start_epoch)
def quantize_model(self,model):
"""
Recursively quantize a pretrained single-precision model to int8 quantized model
model: pretrained single-precision model
"""
weight_bit = self.settings.qw
act_bit = self.settings.qa
# quantize convolutional and linear layers
if type(model) == nn.Conv2d:
quant_mod = Quant_Conv2d(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.Linear:
quant_mod = Quant_Linear(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
# quantize all the activation
elif type(model) == nn.ReLU or type(model) == nn.ReLU6:
return nn.Sequential(*[model, QuantAct(activation_bit=act_bit)])
# recursively use the quantized module to replace the single-precision module
elif type(model) == nn.Sequential:
mods = []
for n, m in model.named_children():
mods.append(self.quantize_model(m))
return nn.Sequential(*mods)
else:
q_model = copy.deepcopy(model)
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
setattr(q_model, attr, self.quantize_model(mod))
return q_model
def _replace(self):
self.model = self.quantize_model(self.model)
def freeze_model(self,model):
"""
freeze the activation range
"""
if type(model) == QuantAct:
model.fix()
elif type(model) == nn.Sequential:
for n, m in model.named_children():
self.freeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
self.freeze_model(mod)
return model
def unfreeze_model(self,model):
"""
unfreeze the activation range
"""
if type(model) == QuantAct:
model.unfix()
elif type(model) == nn.Sequential:
for n, m in model.named_children():
self.unfreeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
self.unfreeze_model(mod)
return model
def run(self):
best_top1 = 100
best_top5 = 100
start_time = time.time()
test_error, test_loss, test5_error = self.trainer.test_teacher(0)
best_ep = 0
try:
for epoch in range(self.start_epoch, self.settings.nEpochs):
self.epoch = epoch
self.start_epoch = 0
if epoch < 4:
print ("\n self.unfreeze_model(self.model)\n")
self.unfreeze_model(self.model)
train_error, train_loss, train5_error = self.trainer.train(epoch=epoch)
self.freeze_model(self.model)
if self.settings.dataset in ["cifar100","cifar10"]:
test_error, test_loss, test5_error = self.trainer.test(epoch=epoch)
elif self.settings.dataset in ["imagenet"]:
if epoch > self.settings.warmup_epochs - 2:
test_error, test_loss, test5_error = self.trainer.test(epoch=epoch)
else:
test_error = 100
test5_error = 100
else:
assert False, "invalid data set"
if best_top1 >= test_error:
best_ep = epoch+1
best_top1 = test_error
best_top5 = test5_error
print('Saving a best checkpoint ...')
torch.save(self.trainer.model.state_dict(),f"{self.settings.ckpt_path}/student_model_{self.settings.dataset}-{self.settings.network}-w{self.settings.qw}_a{self.settings.qa}.pt")
torch.save(self.trainer.generator.state_dict(),f"{self.settings.ckpt_path}/generator_{self.settings.dataset}-{self.settings.network}-w{self.settings.qw}_a{self.settings.qa}.pt")
self.logger.info("#==>Best Result of ep {:d} is: Top1 Error: {:f}, Top5 Error: {:f}, at ep {:d}".format(epoch+1, best_top1, best_top5, best_ep))
self.logger.info("#==>Best Result of ep {:d} is: Top1 Accuracy: {:f}, Top5 Accuracy: {:f} at ep {:d}".format(epoch+1 , 100 - best_top1,
100 - best_top5, best_ep))
except BaseException as e:
self.logger.error("Training is terminating due to exception: {}".format(str(e)))
traceback.print_exc()
end_time = time.time()
time_interval = end_time - start_time
t_string = "Running Time is: " + str(datetime.timedelta(seconds=time_interval)) + "\n"
self.logger.info(t_string)
return best_top1, best_top5
def main():
parser = argparse.ArgumentParser(description='Baseline')
parser.add_argument('--conf_path', type=str, metavar='conf_path',
help='input the path of config file')
parser.add_argument('--id', type=int, metavar='experiment_id',
help='Experiment ID')
parser.add_argument('--freeze', action='store_true')
parser.add_argument('--multi_label_prob', type=float, default=0.0)
parser.add_argument('--multi_label_num', type=int, default=2)
parser.add_argument('--gpu', type=str, default="0")
parser.add_argument('--randemb', action='store_true')
parser.add_argument('--no_DM', action='store_false')
parser.add_argument('--qw', type=int, default=None)
parser.add_argument('--qa', type=int, default=None)
parser.add_argument('--noise_scale', type=float, default=1.0)
parser.add_argument('--ckpt_path', type=str, default='./ckpt')
parser.add_argument('--eval',action='store_true')
args = parser.parse_args()
print(args)
os.makedirs(args.ckpt_path, exist_ok=True)
option = Option(args.conf_path, args)
option.manualSeed = args.id + 1
option.experimentID = option.experimentID + "{:0>2d}_repeat".format(args.id)
if option.dataset in ["cifar100","cifar10"]:
if option.network in ["resnet20_cifar100","resnet20_cifar10"]:
weight_t = ptcv_get_model(option.network, pretrained=True).output.weight.detach()
if args.randemb:
weight_t = None
generator = Generator(option, teacher_weight=weight_t, freeze=args.freeze)
else:
assert False, "unsupport network: " + option.network
elif option.dataset in ["imagenet"]:
if option.network in ["resnet18","resnet50","mobilenetv2_w1"]:
if option.network in ["mobilenetv2_w1"]:
weight_t = ptcv_get_model(option.network, pretrained=True).output.weight.detach().squeeze(-1).squeeze(-1)
else:
weight_t = ptcv_get_model(option.network, pretrained=True).output.weight.detach()
if args.randemb:
weight_t = None
generator = Generator_imagenet(option, teacher_weight=weight_t, freeze=args.freeze)
else:
assert False, "unsupport network: " + option.network
else:
assert False, "invalid data set"
experiment = ExperimentDesign(generator, option)
if args.eval:
weight_path = f"{args.ckpt_path}/student_model_{option.dataset}-{option.network}-w{option.qw}_a{option.qa}.pt"
experiment.trainer.model.load_state_dict(torch.load(weight_path))
experiment.trainer.test_student()
else:
experiment.run()
if __name__ == '__main__':
main()
| 16,326 | 32.943867 | 182 | py |
null | qimera-main/options.py | import os
import shutil
from pyhocon import ConfigFactory
from utils.opt_static import NetOption
class Option(NetOption):
def __init__(self, conf_path, args):
super(Option, self).__init__()
self.conf = ConfigFactory.parse_file(conf_path)
# ------------ General options ----------------------------------------
self.save_path = self.conf['save_path']
self.dataPath = self.conf['dataPath'] # path for loading data set
self.dataset = self.conf['dataset'] # options: imagenet | cifar100
self.nGPU = self.conf['nGPU'] # number of GPUs to use by default
self.GPU = self.conf['GPU'] # default gpu to use, options: range(nGPU)
self.visible_devices = args.gpu#self.conf['visible_devices']
self.network = self.conf['network']
# ------------- Data options -------------------------------------------
self.nThreads = self.conf['nThreads'] # number of data loader threads
# ---------- Optimization options --------------------------------------
self.nEpochs = self.conf['nEpochs'] # number of total epochs to train
self.batchSize = self.conf['batchSize'] # mini-batch size
self.momentum = self.conf['momentum'] # momentum
self.weightDecay = float(self.conf['weightDecay']) # weight decay
self.opt_type = self.conf['opt_type']
self.warmup_epochs = self.conf['warmup_epochs'] # number of epochs for warmup
self.lr_S = self.conf['lr_S'] # initial learning rate
self.lrPolicy_S = self.conf['lrPolicy_S'] # options: multi_step | linear | exp | const | step
self.step_S = self.conf['step_S'] # step for linear or exp learning rate policy
self.decayRate_S = self.conf['decayRate_S'] # lr decay rate
# ---------- Quantization options ---------------------------------------------
if args.qw == None:
self.qw = self.conf['qw']
else:
self.qw = args.qw
if args.qa == None:
self.qa = self.conf['qa']
else:
self.qa = args.qa
# ---------- Model options ---------------------------------------------
self.experimentID = self.conf['experimentID']+self.conf['network']+"_qw_"+str(self.qw)+"_qa_"+str(self.qa)+"_freeze_"+str(args.freeze)+"_prob_"+str(args.multi_label_prob)+"_multi_label_"+str(args.multi_label_num)+"_randemb_"+str(args.randemb)+"_" ##################self.conf['experimentID']
self.nClasses = self.conf['nClasses'] # number of classes in the dataset
# ----------KD options ---------------------------------------------
self.temperature = self.conf['temperature']
self.alpha = self.conf['alpha']
# ----------Generator options ---------------------------------------------
self.latent_dim = self.conf['latent_dim']
self.img_size = self.conf['img_size']
self.channels = self.conf['channels']
self.lr_G = self.conf['lr_G']
self.lrPolicy_G = self.conf['lrPolicy_G'] # options: multi_step | linear | exp | const | step
self.step_G = self.conf['step_G'] # step for linear or exp learning rate policy
self.decayRate_G = self.conf['decayRate_G'] # lr decay rate
self.b1 = self.conf['b1']
self.b2 = self.conf['b2']
# ----------More option ---------------------------------------------
self.multi_label_prob = args.multi_label_prob
self.multi_label_num = args.multi_label_num
self.no_DM = args.no_DM
self.noise_scale = args.noise_scale
self.intermediate_dim = 100
if self.network == "resnet20":
self.intermediate_dim = 64
self.ckpt_path = args.ckpt_path
self.eval = args.eval
def set_save_path(self):
self.save_path = self.save_path + "log_{}_{}_bs{:d}_lr{:.4f}_qw{:d}_qa{:d}_epoch{}/".format(
self.dataset, self.experimentID, self.batchSize, self.lr, self.qw, self.qa,
self.nEpochs)
if os.path.exists(self.save_path) and not self.eval:
print("{} file exist!".format(self.save_path))
action = input("Select Action: d (delete) / q (quit):").lower().strip()
act = action
if act == 'd':
shutil.rmtree(self.save_path)
else:
raise OSError("Directory {} exits!".format(self.save_path))
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def paramscheck(self, logger):
logger.info("|===>The used PyTorch version is {}".format(
self.torch_version))
if self.dataset in ["cifar10", "mnist"]:
self.nClasses = 10
elif self.dataset == "cifar100":
self.nClasses = 100
elif self.dataset == "imagenet" or "thi_imgnet":
self.nClasses = 1000
elif self.dataset == "imagenet100":
self.nClasses = 100
| 4,420 | 38.123894 | 292 | py |
null | qimera-main/run_cifar100_4bit.sh | #!/bin/bash
python main.py --conf_path ./cifar100_resnet20.hocon --multi_label_prob 0.4 --multi_label_num 10 --id 01 | 117 | 38.333333 | 104 | sh |
null | qimera-main/run_cifar10_4bit.sh | #!/bin/bash
python main.py --conf_path ./cifar10_resnet20.hocon --multi_label_prob 0.4 --multi_label_num 2 --id 01 --randemb | 125 | 41 | 112 | sh |
null | qimera-main/run_imgnet_mobilenet_v2_4bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_mobilenet_v2.hocon --multi_label_prob 0.4 --multi_label_num 100 --id 01 --randemb | 132 | 43.333333 | 119 | sh |
null | qimera-main/run_imgnet_resnet18_4bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_resnet18.hocon --multi_label_prob 0.4 --multi_label_num 500 --id 01 --randemb | 128 | 42 | 115 | sh |
null | qimera-main/run_imgnet_resnet50_4bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_resnet50.hocon --multi_label_prob 0.7 --multi_label_num 500 --id 01 | 118 | 38.666667 | 105 | sh |
null | qimera-main/trainer.py | """
basic trainer
"""
import time
import torch.autograd
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import utils as utils
import numpy as np
import torch
import math
__all__ = ["Trainer"]
class Trainer(object):
"""
trainer for training network, use SGD
"""
def __init__(self, model, model_teacher, generator, lr_master_S, lr_master_G,
train_loader, test_loader, settings, logger, tensorboard_logger=None,
opt_type="SGD", optimizer_state=None, run_count=0):
"""
init trainer
"""
self.settings = settings
self.model = utils.data_parallel(
model, self.settings.nGPU, self.settings.GPU)
self.model_teacher = utils.data_parallel(
model_teacher, self.settings.nGPU, self.settings.GPU)
self.all_idx = torch.IntTensor([x for x in range(self.settings.nClasses)]).cuda()
self.no_noise = torch.zeros(self.settings.nClasses,self.settings.latent_dim).cuda()
self.generator = utils.data_parallel(
generator, self.settings.nGPU, self.settings.GPU)
self.train_loader = train_loader
self.test_loader = test_loader
self.tensorboard_logger = tensorboard_logger
self.log_soft = nn.LogSoftmax(dim=1)
self.MSE_loss = nn.MSELoss().cuda()
self.lr_master_S = lr_master_S
self.lr_master_G = lr_master_G
self.opt_type = opt_type
if opt_type == "SGD":
self.optimizer_S = torch.optim.SGD(
params=self.model.parameters(),
lr=self.lr_master_S.lr,
momentum=self.settings.momentum,
weight_decay=self.settings.weightDecay,
nesterov=True,
)
elif opt_type == "RMSProp":
self.optimizer_S = torch.optim.RMSprop(
params=self.model.parameters(),
lr=self.lr_master_S.lr,
eps=1.0,
weight_decay=self.settings.weightDecay,
momentum=self.settings.momentum,
alpha=self.settings.momentum
)
elif opt_type == "Adam":
self.optimizer_S = torch.optim.Adam(
params=self.model.parameters(),
lr=self.lr_master_S.lr,
eps=1e-5,
weight_decay=self.settings.weightDecay
)
else:
assert False, "invalid type: %d" % opt_type
if optimizer_state is not None:
self.optimizer_S.load_state_dict(optimizer_state)
self.optimizer_G = torch.optim.Adam(self.generator.parameters(), lr=self.settings.lr_G,
betas=(self.settings.b1, self.settings.b2))
self.logger = logger
self.run_count = run_count
self.scalar_info = {}
self.mean_list = []
self.var_list = []
self.teacher_running_mean = []
self.teacher_running_var = []
self.save_BN_mean = []
self.save_BN_var = []
self.fix_G = False
def update_lr(self, epoch):
"""
update learning rate of optimizers
:param epoch: current training epoch
"""
lr_S = self.lr_master_S.get_lr(epoch)
lr_G = self.lr_master_G.get_lr(epoch)
# update learning rate of model optimizer
for param_group in self.optimizer_S.param_groups:
param_group['lr'] = lr_S
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr_G
def loss_fn_kd(self, output, labels, teacher_outputs, linear=None):
"""
Compute the knowledge-distillation (KD) loss given outputs, labels.
"Hyperparameters": temperature and alpha
NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
and student expects the input tensor to be log probabilities! See Issue #2
"""
criterion_d = nn.CrossEntropyLoss(reduction='none').cuda()
kdloss = nn.KLDivLoss().cuda()
alpha = self.settings.alpha
T = self.settings.temperature
a = F.log_softmax(output / T, dim=1)
b = F.softmax(teacher_outputs / T, dim=1)
c = (alpha * T * T)
d = (-(linear*self.log_soft(output)).sum(dim=1)).mean()
KD_loss = kdloss(a,b)*c + d
return KD_loss
def forward(self, images, teacher_outputs, labels=None, linear=None):
"""
forward propagation
"""
# forward and backward and optimize
output = self.model(images)
if labels is not None:
loss = self.loss_fn_kd(output, labels, teacher_outputs, linear)
return output, loss
else:
return output, None
def backward_G(self, loss_G):
"""
backward propagation
"""
self.optimizer_G.zero_grad()
loss_G.backward()
self.optimizer_G.step()
def backward_S(self, loss_S):
"""
backward propagation
"""
self.optimizer_S.zero_grad()
loss_S.backward()
self.optimizer_S.step()
def backward(self, loss):
"""
backward propagation
"""
self.optimizer_G.zero_grad()
self.optimizer_S.zero_grad()
loss.backward()
self.optimizer_G.step()
self.optimizer_S.step()
def hook_fn_forward(self,module, input, output):
input = input[0]
mean = input.mean([0, 2, 3])
# use biased var in train
var = input.var([0, 2, 3], unbiased=False)
self.mean_list.append(mean)
self.var_list.append(var)
self.teacher_running_mean.append(module.running_mean)
self.teacher_running_var.append(module.running_var)
def train(self, epoch):
"""
training
"""
top1_error = utils.AverageMeter()
top1_loss = utils.AverageMeter()
top5_error = utils.AverageMeter()
fp_acc = utils.AverageMeter()
iters = 200
self.update_lr(epoch)
self.model.eval()
self.model_teacher.eval()
self.generator.train()
start_time = time.time()
end_time = start_time
if epoch==0:
for m in self.model_teacher.modules():
if isinstance(m, nn.BatchNorm2d):
m.register_forward_hook(self.hook_fn_forward)
for i in range(iters):
start_time = time.time()
data_time = start_time - end_time
multi_class = torch.rand(1)
self.MERGE_PARAM = self.settings.multi_label_num
MERGE_PROB = self.settings.multi_label_prob # superpose probability
if multi_class<MERGE_PROB:
# Get labels ranging from 0 to n_classes for n rows
z = Variable(torch.randn(self.settings.batchSize, self.MERGE_PARAM,self.settings.latent_dim)).cuda()
labels = Variable(torch.randint(0, self.settings.nClasses, (self.settings.batchSize,self.MERGE_PARAM))).cuda()
linear = F.softmax(torch.randn(self.settings.batchSize,self.MERGE_PARAM),dim=1).cuda()
z = z.contiguous()
labels = labels.contiguous()
labels_loss = Variable(torch.zeros(self.settings.batchSize,self.settings.nClasses)).cuda()
labels_loss.scatter_add_(1,labels,linear)
images = self.generator(z, labels, linear)
else:
z = Variable(torch.randn(self.settings.batchSize, self.settings.latent_dim)).cuda()
labels = Variable(torch.randint(0, self.settings.nClasses, (self.settings.batchSize,))).cuda()
z = z.contiguous()
labels = labels.contiguous()
images = self.generator(z, labels)
labels_loss = Variable(torch.zeros(self.settings.batchSize,self.settings.nClasses)).cuda()
labels_loss.scatter_(1,labels.unsqueeze(1),1.0)
self.mean_list.clear()
self.var_list.clear()
output_teacher_batch = self.model_teacher(images)
loss_one_hot = (-(labels_loss*self.log_soft(output_teacher_batch)).sum(dim=1)).mean()
# BN statistic loss
BNS_loss = torch.zeros(1).cuda()
for num in range(len(self.mean_list)):
BNS_loss += self.MSE_loss(self.mean_list[num], self.teacher_running_mean[num]) + self.MSE_loss(
self.var_list[num], self.teacher_running_var[num])
BNS_loss = BNS_loss / len(self.mean_list)
# loss of Generator
loss_G = loss_one_hot + 0.1 * BNS_loss
self.backward_G(loss_G)
output, loss_S = self.forward(images.detach(), output_teacher_batch.detach(), labels,linear=labels_loss)
if epoch>= self.settings.warmup_epochs:
self.backward_S(loss_S)
if multi_class<MERGE_PROB:
single_error, single_loss, single5_error = utils.compute_singlecrop(
outputs=output, labels=labels[:,0],
loss=loss_S, top5_flag=True, mean_flag=True)
else:
single_error, single_loss, single5_error = utils.compute_singlecrop(
outputs=output, labels=labels,
loss=loss_S, top5_flag=True, mean_flag=True)
top1_error.update(single_error, images.size(0))
top1_loss.update(single_loss, images.size(0))
top5_error.update(single5_error, images.size(0))
end_time = time.time()
gt = labels.data.cpu().numpy()
d_acc = np.mean(np.argmax(output_teacher_batch.data.cpu().numpy(), axis=1) == gt)
fp_acc.update(d_acc)
print(
"[Epoch %d/%d] [Batch %d/%d] [acc: %.4f%%] [G loss: %f] [One-hot loss: %f] [BNS_loss:%f] [S loss: %f]"
% (epoch + 1, self.settings.nEpochs, i+1, iters, 100 * fp_acc.avg, loss_G.item(), loss_one_hot.item(), BNS_loss.item(),
loss_S.item())
)
self.scalar_info['accuracy every epoch'] = 100 * d_acc
self.scalar_info['G loss every epoch'] = loss_G
self.scalar_info['One-hot loss every epoch'] = loss_one_hot
self.scalar_info['S loss every epoch'] = loss_S
self.scalar_info['training_top1error'] = top1_error.avg
self.scalar_info['training_top5error'] = top5_error.avg
self.scalar_info['training_loss'] = top1_loss.avg
if self.tensorboard_logger is not None:
for tag, value in list(self.scalar_info.items()):
self.tensorboard_logger.scalar_summary(tag, value, self.run_count)
self.scalar_info = {}
return top1_error.avg, top1_loss.avg, top5_error.avg
def test_student(self):
"""
testing
"""
top1_error = utils.AverageMeter()
top1_loss = utils.AverageMeter()
top5_error = utils.AverageMeter()
self.model.eval()
self.model_teacher.eval()
iters = len(self.test_loader)
start_time = time.time()
end_time = start_time
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
start_time = time.time()
labels = labels.cuda()
images = images.cuda()
output = self.model(images)
loss = torch.ones(1)
self.mean_list.clear()
self.var_list.clear()
single_error, single_loss, single5_error = utils.compute_singlecrop(
outputs=output, loss=loss,
labels=labels, top5_flag=True, mean_flag=True)
top1_error.update(single_error, images.size(0))
top1_loss.update(single_loss, images.size(0))
top5_error.update(single5_error, images.size(0))
end_time = time.time()
print()
print(
"Student Model Accuracy : %.4f%%"
% (100.00-top1_error.avg))
def test(self, epoch):
"""
testing
"""
top1_error = utils.AverageMeter()
top1_loss = utils.AverageMeter()
top5_error = utils.AverageMeter()
self.model.eval()
self.model_teacher.eval()
iters = len(self.test_loader)
start_time = time.time()
end_time = start_time
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
start_time = time.time()
labels = labels.cuda()
images = images.cuda()
output = self.model(images)
loss = torch.ones(1)
self.mean_list.clear()
self.var_list.clear()
single_error, single_loss, single5_error = utils.compute_singlecrop(
outputs=output, loss=loss,
labels=labels, top5_flag=True, mean_flag=True)
top1_error.update(single_error, images.size(0))
top1_loss.update(single_loss, images.size(0))
top5_error.update(single5_error, images.size(0))
end_time = time.time()
print(
"[Epoch %d/%d] [Batch %d/%d] [acc: %.4f%%]"
% (epoch + 1, self.settings.nEpochs, i + 1, iters, (100.00-top1_error.avg))
)
self.scalar_info['testing_top1error'] = top1_error.avg
self.scalar_info['testing_top5error'] = top5_error.avg
self.scalar_info['testing_loss'] = top1_loss.avg
if self.tensorboard_logger is not None:
for tag, value in self.scalar_info.items():
self.tensorboard_logger.scalar_summary(tag, value, self.run_count)
self.scalar_info = {}
self.run_count += 1
return top1_error.avg, top1_loss.avg, top5_error.avg
def test_teacher(self, epoch):
"""
testing
"""
top1_error = utils.AverageMeter()
top1_loss = utils.AverageMeter()
top5_error = utils.AverageMeter()
self.model_teacher.eval()
iters = len(self.test_loader)
start_time = time.time()
end_time = start_time
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
start_time = time.time()
data_time = start_time - end_time
labels = labels.cuda()
if self.settings.tenCrop:
image_size = images.size()
images = images.view(
image_size[0] * 10, image_size[1] / 10, image_size[2], image_size[3])
images_tuple = images.split(image_size[0])
output = None
for img in images_tuple:
if self.settings.nGPU == 1:
img = img.cuda()
img_var = Variable(img, volatile=True)
temp_output, _ = self.forward(img_var)
if output is None:
output = temp_output.data
else:
output = torch.cat((output, temp_output.data))
single_error, single_loss, single5_error = utils.compute_tencrop(
outputs=output, labels=labels)
else:
if self.settings.nGPU == 1:
images = images.cuda()
output = self.model_teacher(images)
loss = torch.ones(1)
self.mean_list.clear()
self.var_list.clear()
single_error, single_loss, single5_error = utils.compute_singlecrop(
outputs=output, loss=loss,
labels=labels, top5_flag=True, mean_flag=True)
#
top1_error.update(single_error, images.size(0))
top1_loss.update(single_loss, images.size(0))
top5_error.update(single5_error, images.size(0))
end_time = time.time()
iter_time = end_time - start_time
print(
"Teacher network: [Epoch %d/%d] [Batch %d/%d] [acc: %.4f%%]"
% (epoch + 1, self.settings.nEpochs, i + 1, iters, (100.00 - top1_error.avg))
)
self.run_count += 1
return top1_error.avg, top1_loss.avg, top5_error.avg
| 13,496 | 27.901499 | 123 | py |
null | qimera-main/other_train_scripts/eval_cifar100_5bit.sh | #!/bin/bash
python main.py --conf_path ./cifar100_resnet20.hocon --multi_label_prob 0.4 --multi_label_num 50 --id 01 --qw 5 --qa 5 --eval | 139 | 45.666667 | 126 | sh |
null | qimera-main/other_train_scripts/eval_cifar10_5bit.sh | #!/bin/bash
python main.py --conf_path ./cifar10_resnet20.hocon --multi_label_prob 0.4 --multi_label_num 2 --id 01 --randemb --qw 5 --qa 5 --eval | 146 | 48 | 133 | sh |
null | qimera-main/other_train_scripts/eval_imgnet_mobilenet_v2_5bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_mobilenet_v2.hocon --multi_label_prob 0.4 --multi_label_num 100 --id 01 --randemb --qw 5 --qa 5 --eval | 154 | 50.666667 | 141 | sh |
null | qimera-main/other_train_scripts/eval_imgnet_resnet18_5bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_resnet18.hocon --multi_label_prob 0.4 --multi_label_num 100 --id 01 --randemb --qw 5 --qa 5 --eval | 150 | 49.333333 | 137 | sh |
null | qimera-main/other_train_scripts/eval_imgnet_resnet50_5bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_resnet50.hocon --multi_label_prob 0.7 --multi_label_num 100 --id 01 --qw 5 --qa 5 --eval | 140 | 46 | 127 | sh |
null | qimera-main/other_train_scripts/run_cifar100_5bit.sh | #!/bin/bash
python main.py --conf_path ./cifar100_resnet20.hocon --multi_label_prob 0.4 --multi_label_num 50 --id 01 --qw 5 --qa 5 | 132 | 43.333333 | 119 | sh |
null | qimera-main/other_train_scripts/run_cifar10_5bit.sh | #!/bin/bash
python main.py --conf_path ./cifar10_resnet20.hocon --multi_label_prob 0.4 --multi_label_num 2 --id 01 --randemb --qw 5 --qa 5 | 139 | 45.666667 | 126 | sh |
null | qimera-main/other_train_scripts/run_imgnet_mobilenet_v2_5bit.sh | #!/bin/bash
python main.py --conf_path ./imagenet_mobilenet_v2.hocon --multi_label_prob 0.4 --multi_label_num 100 --id 01 --randemb --qw 5 --qa 5 | 147 | 48.333333 | 134 | sh |