From ab81b321078c45c0ee2841f412375963d3e388d9 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Wed, 1 Dec 2021 12:44:27 -0800 Subject: [PATCH 01/40] fixing CI actions (#1) * running python files through linter * using the correct submodule name * removed slash * adding correct tag * fixing build for bionic * linking readme and adding more discription --- .github/workflows/ci_pr.yml | 66 +- .github/workflows/ci_push.yml | 12 +- .github/workflows/ci_release.yml | 10 +- README.md | 41 +- .../inspection/scripts/pano_orientations.py | 1 - .../acoustics_cam/nodes/acoustics_cam | 28 +- .../acoustics_cam/src/generate_pure_tones.py | 30 +- .../tools/cameras_to_texrecon.py | 114 ++- .../geometry_mapper/tools/geometry_mapper.py | 900 ++++++++++++------ img_analysis/resources/analyse_img.py | 69 -- img_analysis/resources/vent_cnn.py | 156 --- img_analysis/scripts/analyse_img.py | 52 +- img_analysis/scripts/train_cnn_vent.py | 97 +- scripts/docker/build.sh | 2 + scripts/git/cpplint_repo.py | 4 + scripts/setup/build_install_dependencies.sh | 6 +- 16 files changed, 852 insertions(+), 736 deletions(-) delete mode 100644 img_analysis/resources/analyse_img.py delete mode 100644 img_analysis/resources/vent_cnn.py diff --git a/.github/workflows/ci_pr.yml b/.github/workflows/ci_pr.yml index dbb37e55..5463516b 100644 --- a/.github/workflows/ci_pr.yml +++ b/.github/workflows/ci_pr.yml @@ -14,21 +14,21 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu16.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa + -t isaac/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' -t isaac:latest-ubuntu16.04 build-bionic: @@ -39,21 +39,21 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu18.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t isaac/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 -t isaac:latest-ubuntu18.04 build-focal: @@ -64,19 +64,19 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu20.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t isaac/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 -t isaac:latest-ubuntu20.04 diff --git a/.github/workflows/ci_push.yml b/.github/workflows/ci_push.yml index 85f1e14e..9ff51af1 100644 --- a/.github/workflows/ci_push.yml +++ b/.github/workflows/ci_push.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 16 run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ @@ -22,7 +22,7 @@ jobs: --build-arg ROS_VERSION=kinetic \ --build-arg PYTHON='' \ --build-arg REMOTE=ghcr.io/nasa \ - -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu16.04 + -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 run: docker build . -f ./scripts/docker/isaac.Dockerfile \ @@ -48,7 +48,7 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 18 run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ @@ -56,7 +56,7 @@ jobs: --build-arg ROS_VERSION=melodic \ --build-arg PYTHON=3 \ --build-arg REMOTE=ghcr.io/nasa \ - -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu18.04 + -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 run: docker build . -f ./scripts/docker/isaac.Dockerfile \ @@ -82,7 +82,7 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 20 run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ @@ -90,7 +90,7 @@ jobs: --build-arg ROS_VERSION=noetic \ --build-arg PYTHON=3 \ --build-arg REMOTE=ghcr.io/nasa \ - -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu20.04 + -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 run: docker build . -f ./scripts/docker/isaac.Dockerfile \ diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml index 462366c8..547c8e24 100644 --- a/.github/workflows/ci_release.yml +++ b/.github/workflows/ci_release.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 16 run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 18 run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ @@ -59,7 +59,7 @@ jobs: --build-arg ROS_VERSION=melodic \ --build-arg PYTHON=3 \ --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu18.04 + -t isaac/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 run: docker build . -f ./scripts/docker/isaac.Dockerfile \ @@ -88,7 +88,7 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 20 run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ @@ -96,7 +96,7 @@ jobs: --build-arg ROS_VERSION=noetic \ --build-arg PYTHON=3 \ --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu20.04 + -t isaac/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 run: docker build . -f ./scripts/docker/isaac.Dockerfile \ diff --git a/README.md b/README.md index bcb34ab2..efcde8f2 100644 --- a/README.md +++ b/README.md @@ -2,14 +2,26 @@ ### About +The Integrated System for Autonomous and Adaptive Caretaking project is developing technology for combining robots inside a spacecraft with vehicle infrastructure subsystems, to form an integrated autonomous system. +The system validates its technology through demonstrations using the Astrobee free-flier robots, existing robots onboard the International Space Station as analogs for future robots that will be developed for the Gateway. + The ISAAC project actively develops in a variety of repos hosted at -Ames and JSC, as described on the (collaboration tools wiki -page)[https://babelfish.arc.nasa.gov/confluence/display/astrosoft/Astrosoft+Collaboration+Tools] +Ames and JSC. This `isaac` repo serves as a master for integrating an end-to-end -demo that draws on code from the other repos (as well as directly +demo that draws on code from the other repos as well as directly including a significant amount of the ISAAC code, mostly relating to -the Astrobee robot). +the Astrobee robot. + + +### System requirements + +The `isaac` repo depends on the `astrobee` repo, therefore it inherits +the same system requirements. You must use Ubuntu 16.04 to 20.04 64-bit. When +running in simulation, certain Gazebo plugins require appropriate +graphics drivers. + +### Usage There are two main ways to install and run `isaac`: @@ -25,22 +37,13 @@ There are two main ways to install and run `isaac`: in your native OS, and you don't need to install for development prior to installing for demo. -### System requirements - -The `isaac` repo depends on the `astrobee` repo, therefore it inherits -the same system requirements. You must use Ubuntu 16.04 to 20.04 64-bit. When -running in simulation, certain Gazebo plugins require appropriate -graphics drivers. See INSTALL.md in that repository for more -information. - -### Usage - -[Instructions on installing and using the ISAAC Software](INSTALL.md). [For running -the demos](DEMO_INSTALL.md) +[Instructions on installing and using the ISAAC Software](https://nasa.github.io/isaac/html/md_INSTALL.html). For running the [docker demos](https://nasa.github.io/isaac/html/md_DEMO_INSTALL.html) ### Documentation -There are Doxygen comments in the header files. To compile (make sure you have the latest doxygen installed): +[The documentation is auto-generated from the contents of this repository.](https://nasa.github.io/isaac/documentation.html) + +To compile the documentation locally (make sure you have the latest doxygen installed): doxygen isaac.doxyfile @@ -48,8 +51,8 @@ There are Doxygen comments in the header files. To compile (make sure you have t The ISAAC Software is open source, and we welcome contributions from the public. Please submit pull requests to the `develop` branch. For us to merge any pull -requests, we must request that contributors sign and submit a Contributor License -Agreement due to NASA legal requirements. Thank you for your understanding. +requests, we must request that contributors sign and submit either an [Individual Contributor License Agreement](https://github.com/nasa/isaac/blob/94996bc1a20fa090336e67b3db5c10a9bb30f0f7/doc/cla/ISAAC_Individual%20CLA.pdf) or a [Corporate Contributor License +Agreement](https://github.com/nasa/isaac/blob/94996bc1a20fa090336e67b3db5c10a9bb30f0f7/doc/cla/ISAAC_Corporate%20CLA.pdf) due to NASA legal requirements. Thank you for your understanding. ### License diff --git a/astrobee/behaviors/inspection/scripts/pano_orientations.py b/astrobee/behaviors/inspection/scripts/pano_orientations.py index ea206094..801b259b 100755 --- a/astrobee/behaviors/inspection/scripts/pano_orientations.py +++ b/astrobee/behaviors/inspection/scripts/pano_orientations.py @@ -16,7 +16,6 @@ import numpy as np from scipy.spatial.transform import Rotation - EPS = 1e-5 ROLL, PITCH, YAW = 0, 1, 2 diff --git a/astrobee/simulation/acoustics_cam/nodes/acoustics_cam b/astrobee/simulation/acoustics_cam/nodes/acoustics_cam index dfec1d61..cd0302a7 100755 --- a/astrobee/simulation/acoustics_cam/nodes/acoustics_cam +++ b/astrobee/simulation/acoustics_cam/nodes/acoustics_cam @@ -28,24 +28,32 @@ the result as a camera image, called acoustics_cam. # python 2/3 compatibility from __future__ import print_function -import copy, os, json, time, sys, cv2, math, re, threading -import numpy as np -import scipy.interpolate -from scipy.io import wavfile -from scipy.interpolate import RegularGridInterpolator +import copy +import json +import math +import os +import re +import sys +import threading +import time + +import cv2 +import geometry_msgs.msg import matplotlib.pyplot as plt +import numpy as np import pyroomacoustics as pra +import roslib # ROS import rospy -import roslib -import tf -import geometry_msgs.msg +import scipy.interpolate import std_msgs.msg -from sensor_msgs.msg import CameraInfo -from sensor_msgs.msg import Image +import tf from cv_bridge import CvBridge from ff_msgs.msg import CommandStamped +from scipy.interpolate import RegularGridInterpolator +from scipy.io import wavfile +from sensor_msgs.msg import CameraInfo, Image # Making this larger computes the spatial response at a denser set of directions. NUM_ANGLES = 4 * 45 * 22 # 180 * 90 diff --git a/astrobee/simulation/acoustics_cam/src/generate_pure_tones.py b/astrobee/simulation/acoustics_cam/src/generate_pure_tones.py index 007054dc..3351cb70 100644 --- a/astrobee/simulation/acoustics_cam/src/generate_pure_tones.py +++ b/astrobee/simulation/acoustics_cam/src/generate_pure_tones.py @@ -19,17 +19,18 @@ # under the License. import numpy as np +from matplotlib import pyplot as plt +from scipy.interpolate import RegularGridInterpolator from scipy.io import wavfile from scipy.signal import welch -from scipy.interpolate import RegularGridInterpolator -from matplotlib import pyplot as plt fs = 32000 # sample rate, Hz -T = 30. # sample duration, seconds +T = 30.0 # sample duration, seconds freqs = [12333, 10533] # Hz rng = np.random.RandomState(23) + def get_signal(F, fs, T): t = np.arange(fs * T) / fs weight_sum = 0 @@ -43,32 +44,33 @@ def get_signal(F, fs, T): weight_sum += weight signal += weight * np.clip(1.0 / 3 * rng.normal(size=t.shape), -1, 1) - return (32768 / weight_sum * signal).astype('int16') + return (32768 / weight_sum * signal).astype("int16") + for F in freqs: signal = get_signal(F, fs, T) - fname = 'test_sounds/tone%s.wav' % F + fname = "test_sounds/tone%s.wav" % F wavfile.write(fname, fs, signal) - print('wrote %s' % fname) + print("wrote %s" % fname) for F in freqs: - fname = 'test_sounds/tone%s.wav' % F + fname = "test_sounds/tone%s.wav" % F fs, signal = wavfile.read(fname) - welch_F, Pxx = welch(signal, fs, nperseg=8192, average='median') + welch_F, Pxx = welch(signal, fs, nperseg=8192, average="median") plt.semilogy(welch_F, Pxx) interp = RegularGridInterpolator([welch_F], Pxx) - print('PSD for %s:' % fname) + print("PSD for %s:" % fname) for F in freqs: - print(' @ %s Hz: %s' % (F, interp([F]))) + print(" @ %s Hz: %s" % (F, interp([F]))) plt.legend([str(F) for F in freqs]) -plt.xlabel('Frequency (Hz)') -plt.ylabel('PSD ($V^2$ / Hz)') +plt.xlabel("Frequency (Hz)") +plt.ylabel("PSD ($V^2$ / Hz)") -fname = 'tones.png' +fname = "tones.png" plt.savefig(fname) -print('wrote %s' % fname) +print("wrote %s" % fname) plt.close() diff --git a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py index 518fd2c2..54006b5d 100644 --- a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py +++ b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py @@ -18,79 +18,101 @@ # Create camera files that texrecon will understand. -import sys, os, re, glob, argparse +import argparse +import glob +import os +import re +import sys + import numpy as np -parser = argparse.ArgumentParser(description='Convert cameras to the format of texrecon.') -parser.add_argument('--camera_dir', default = '', help='The directory containing the camera information (the output of geometry_mapper).') -parser.add_argument('--undistorted_image_dir', default = '', help='The directory containing the undistorted images.') -parser.add_argument('--camera_type', default = '', help='The camera type (nav_cam, haz_cam, or sci_cam).') +parser = argparse.ArgumentParser( + description="Convert cameras to the format of texrecon." +) +parser.add_argument( + "--camera_dir", + default="", + help="The directory containing the camera information (the output of geometry_mapper).", +) +parser.add_argument( + "--undistorted_image_dir", + default="", + help="The directory containing the undistorted images.", +) +parser.add_argument( + "--camera_type", default="", help="The camera type (nav_cam, haz_cam, or sci_cam)." +) args = parser.parse_args() if args.camera_dir == "" or args.undistorted_image_dir == "" or args.camera_type == "": - print("Must specify the camera directory, directory of undistorted images, and camera type.") + print( + "Must specify the camera directory, directory of undistorted images, and camera type." + ) sys.exit(1) -if args.camera_type != "nav_cam" and args.camera_type != "haz_cam" \ - and args.camera_type != "sci_cam": +if ( + args.camera_type != "nav_cam" + and args.camera_type != "haz_cam" + and args.camera_type != "sci_cam" +): print("The camera type must be nav_cam, haz_cam, or sci_cam") sys.exit(1) - + # Read the intrinsics intr_file = args.undistorted_image_dir + "/undistorted_intrinsics.txt" if not os.path.exists(intr_file): print("Missing file: " + intr_file) sys.exit(1) -with open(intr_file, 'r') as f: +with open(intr_file, "r") as f: for line in f: - if re.match('^\s*\#', line): - continue # ignore the comments + if re.match("^\s*\#", line): + continue # ignore the comments vals = line.split() if len(vals) < 5: print("Expecting 5 parameters in " + intr_file) sys.exit(1) widx = float(vals[0]) widy = float(vals[1]) - f = float(vals[2]) - cx = float(vals[3]) - cy = float(vals[4]) + f = float(vals[2]) + cx = float(vals[3]) + cy = float(vals[4]) max_wid = widx - if (widy > max_wid): + if widy > max_wid: max_wid = widy - + # normalize - nf = f / max_wid + nf = f / max_wid ncx = cx / widx ncy = cy / widy - d0 = 0.0 - d1 = 0.0 + d0 = 0.0 + d1 = 0.0 paspect = 1.0 - break # finished reading the line we care for - + break # finished reading the line we care for + # Convert the cameras to texrecon's format suffix = "_" + args.camera_type + "_to_world.txt" # Get the cameras to write based on the list of images in the index # We avoid simply doing an ls in that directory to ensure we don't # run into old files -index_file = os.path.join(args.camera_dir, args.camera_type + '_index.txt') +index_file = os.path.join(args.camera_dir, args.camera_type + "_index.txt") with open(index_file, "r") as f: for image_file in f: image_file = image_file.rstrip() image_file = os.path.basename(image_file) - - m = re.match('^(.*?)\.jpg', image_file) + + m = re.match("^(.*?)\.jpg", image_file) if not m: print("Expecting a .jpg file, but got: " + image_file) in_cam = os.path.join(args.camera_dir, m.group(1) + suffix) - + out_cam = args.undistorted_image_dir + "/" + os.path.basename(in_cam) - m = re.match('^(.*?)' + suffix, out_cam) + m = re.match("^(.*?)" + suffix, out_cam) if not m: print("Could not match desired expression.") sys.exit(1) @@ -99,26 +121,34 @@ if not os.path.exists(in_cam): print("Cannot find: " + in_cam) sys.exit(1) - - M = np.loadtxt(in_cam) # camera to world - M = np.linalg.inv(M) # world to camera + + M = np.loadtxt(in_cam) # camera to world + M = np.linalg.inv(M) # world to camera print("Writing: " + out_cam) - with open(out_cam, 'w') as f: + with open(out_cam, "w") as f: # translation - f.write("%0.17g %0.17g %0.17g " % \ - (M[0][3], M[1][3], M[2][3])) + f.write("%0.17g %0.17g %0.17g " % (M[0][3], M[1][3], M[2][3])) # rotation - f.write("%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" % \ - (M[0][0], M[0][1], M[0][2], - M[1][0], M[1][1], M[1][2], - M[2][0], M[2][1], M[2][2])) + f.write( + "%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" + % ( + M[0][0], + M[0][1], + M[0][2], + M[1][0], + M[1][1], + M[1][2], + M[2][0], + M[2][1], + M[2][2], + ) + ) # normaized inrinsics - f.write("%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" % \ - (nf, d0, d1, paspect, ncx, ncy)) - - - + f.write( + "%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" + % (nf, d0, d1, paspect, ncx, ncy) + ) diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index 93e770a6..bd384ea2 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -18,410 +18,670 @@ # License for the specific language governing permissions and limitations # under the License. -''' +""" A wrapper around the tools that when run together produce a textured mesh. -''' +""" + +import argparse +import os +import re +import shutil +import subprocess +import sys + +import cv2 -import sys, os, argparse, subprocess, re, shutil, cv2 def process_args(args): - ''' + """ Set up the parser and parse the args. - ''' - + """ + # Extract some paths before the args are parsed src_path = os.path.dirname(args[0]) - exec_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(src_path)))) - exec_path = os.path.join(exec_path, 'devel/lib/geometry_mapper') - - astrobee_build_dir = os.environ['ASTROBEE_BUILD_PATH'] - - parser = argparse.ArgumentParser(description='Parameters for the geometry mapper.') - parser.add_argument('--ros_bag', dest = 'ros_bag', default = '', - help = 'A ROS bag with recorded nav_cam, haz_cam, and sci_cam data.') - parser.add_argument('--sparse_map', dest = 'sparse_map', default = '', - help = 'A registered sparse map made with some of the ROS bag data.') - parser.add_argument('--output_dir', dest = 'output_dir', default = '', - help = 'The directory where to write the processed data.') - parser.add_argument('--nav_cam_topic', dest = 'nav_cam_topic', default = '/hw/cam_nav', - help = 'The nav cam topic in the bag file.') - parser.add_argument('--haz_cam_points_topic', dest = 'haz_cam_points_topic', - default = '/hw/depth_haz/points', - help = 'The haz cam point cloud topic in the bag file.') - parser.add_argument('--haz_cam_intensity_topic', dest = 'haz_cam_intensity_topic', - default = '/hw/depth_haz/extended/amplitude_int', - help = 'The haz cam intensity topic in the bag file.') - parser.add_argument('--sci_cam_topic', dest = 'sci_cam_topic', default = '/hw/cam_sci', - help = 'The sci cam topic in the bag file.') - parser.add_argument('--camera_type', dest = 'camera_type', default = 'all', - help = 'Specify which cameras to process. By default, process all. ' - + 'Options: nav_cam, sci_cam.') - parser.add_argument('--start', dest = 'start', default = '0.0', - help = 'How many seconds into the bag to start processing the data.') - parser.add_argument('--duration', dest = 'duration', default = '-1.0', - help = 'For how many seconds to do the processing.') - parser.add_argument('--sampling_spacing_seconds', dest = 'sampling_spacing_seconds', - default = '2', - help = 'Spacing to use, in seconds, between consecutive depth images in ' - 'the bag that are processed.') - parser.add_argument('--dist_between_processed_cams', dest = 'dist_between_processed_cams', - default = '0.1', - help = 'Once an image or depth image is processed, how far the camera ' - + 'should move (in meters) before it should process more data.') - parser.add_argument('--sci_cam_timestamps', dest = 'sci_cam_timestamps', - default = '', - help = 'Process only these sci cam timestamps (rather than ' + - 'any in the bag using --dist_between_processed_cams, etc.). ' + - 'Must be a file with one timestamp per line.') - parser.add_argument('--depth_exclude_columns', dest = 'depth_exclude_columns', default = '0', - help = 'Remove this many columns of data from the rectangular ' + - 'depth image sensor at margins to avoid distortion.') - parser.add_argument('--depth_exclude_rows', dest = 'depth_exclude_rows', default = '0', - help = 'Remove this many rows of data from the rectangular ' + - 'depth image sensor at margins to avoid distortion.') - parser.add_argument('--foreshortening_delta', dest = 'foreshortening_delta', default = '5.0', - help = 'A smaller value here will result in holes in depth images ' + - 'being filled more aggressively but potentially with more artifacts ' + - 'in foreshortened regions.') - parser.add_argument('--median_filters', dest = 'median_filters', default = '7 0.1 25 0.1', - help = 'Given a list "w1 d1 w2 d2 ... ", remove a depth image point ' + - 'if it differs, in the Manhattan norm, from the median of depth points ' + - 'in the pixel window of size wi centered at it by more than di. This ' + - 'removes points sticking out for each such i.') - parser.add_argument('--depth_hole_fill_diameter', dest = 'depth_hole_fill_diameter', default = '30', - help = 'Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (--max_hole_diameter).') - parser.add_argument('--max_ray_length', dest = 'max_ray_length', default = '2.0', - help = 'Process haz cam depth image points no further than ' - + 'this distance from the camera.') - parser.add_argument('--voxel_size', dest = 'voxel_size', default = '0.01', - help = 'When fusing the depth point clouds use a voxel of this size, ' - + 'in meters.') - parser.add_argument('--max_iso_times_exposure', dest = 'max_iso_times_exposure', - default = '5.1', - help = 'Apply the inverse gamma transform to images, multiply them by ' - + 'max_iso_times_exposure/ISO/exposure_time to adjust for ' - + 'lightning differences, then apply the gamma transform back. ' - + 'This value should be set to the maximum observed ' - + 'ISO * exposure_time. The default is 5.1. Not used with simulated data.') - parser.add_argument('--smoothing_time', dest = 'smoothing_time', - default = '0.00005', - help = 'A larger value will result in a smoother mesh.') - parser.add_argument('--max_num_hole_edges', dest = 'max_num_hole_edges', - default = '1000', - help = 'Close holes in the mesh which have no more than this many edges.') - parser.add_argument('--max_hole_diameter', dest = 'max_hole_diameter', - default = '0.3', - help = 'The diameter (in meters) of the largest hole in the mesh to fill.') - parser.add_argument('--num_min_faces_in_component', dest = 'num_min_faces_in_component', - default = '100', - help = 'Keep only connected mesh components with at least this many faces.') - parser.add_argument('--num_components_to_keep', dest = 'num_components_to_keep', - default = '10', - help = 'How many of the largest connected components ' - + 'of the mesh to keep. Being too aggressive here can result in a mesh ' - + 'with missing parts.') - parser.add_argument('--edge_keep_ratio', dest = 'edge_keep_ratio', - default = '0.2', - help = 'Simply the mesh keeping only this fraction of the original edges.') - parser.add_argument('--merge_maps', dest = 'merge_maps', - default = '', - help = 'Given several output geometry mapper directories, specified ' + \ - 'as a list in quotes, create a merged textured mesh. The input ' + \ - 'bag and sparse map will not be used. Each input geometry mapper run ' + \ - 'can have its own bag and sparse map. The sparse maps must be ' + \ - 'registered to a global coordinate system and co-registered to ' + \ - 'each other, such as when extracted from a larger merged and ' + \ - 'registered map.') - parser.add_argument('--start_step', dest = 'start_step', default = '0', - help = 'Start processing at this step. Useful for resuming work. ' + \ - 'See the doc for options.') - parser.add_argument('--astrobee_build_dir', dest = 'astrobee_build_dir', - default = astrobee_build_dir, - help = 'The path to the Astrobee build directory.') - parser.add_argument('--localization_options', dest = 'localization_options', - default = '--min_surf_features 400 --max_surf_features 600 --min_surf_threshold 5 --default_surf_threshold 10 --max_surf_threshold 1000 --early_break_landmarks 400 --verbose_localization', - help = 'Options to to use to localize the nav cam images.') - - parser.add_argument('--use_brisk_map', dest = 'use_brisk_map', action = 'store_true', - help = 'Instead of a SURF sparse map made from the same bag that ' + \ - ' needs texturing, use a pre-existing and unrelated BRISK map. ' + - 'This map may be more convenient but less reliable.') - parser.add_argument('--simulated_data', dest = 'simulated_data', action = 'store_true', - help = 'If specified, use data recorded in simulation. ' + \ - 'Then haz and sci camera poses and intrinsics should be recorded ' + \ - 'in the bag file.') - parser.add_argument('--external_mesh', dest = 'external_mesh', default = '', - help = 'Use this mesh to texture the images, rather than creating one ' + \ - 'from depth data in the current bag.') - parser.add_argument('--scicam_to_hazcam_timestamp_offset_override_value', - dest = 'scicam_to_hazcam_timestamp_offset_override_value', - default = '', - help = 'Override the value of scicam_to_hazcam_timestamp_offset ' + \ - 'from the robot config file with this value.') - parser.add_argument('--verbose', dest = 'verbose', action = 'store_true', - help = 'Echo all output in the terminal.') - parser.add_argument('--save_debug_data', dest = 'save_debug_data', action = 'store_true', - help = 'Save many intermediate datasets for debugging.') + exec_path = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.dirname(src_path))) + ) + exec_path = os.path.join(exec_path, "devel/lib/geometry_mapper") + + astrobee_build_dir = os.environ["ASTROBEE_BUILD_PATH"] + + parser = argparse.ArgumentParser(description="Parameters for the geometry mapper.") + parser.add_argument( + "--ros_bag", + dest="ros_bag", + default="", + help="A ROS bag with recorded nav_cam, haz_cam, and sci_cam data.", + ) + parser.add_argument( + "--sparse_map", + dest="sparse_map", + default="", + help="A registered sparse map made with some of the ROS bag data.", + ) + parser.add_argument( + "--output_dir", + dest="output_dir", + default="", + help="The directory where to write the processed data.", + ) + parser.add_argument( + "--nav_cam_topic", + dest="nav_cam_topic", + default="/hw/cam_nav", + help="The nav cam topic in the bag file.", + ) + parser.add_argument( + "--haz_cam_points_topic", + dest="haz_cam_points_topic", + default="/hw/depth_haz/points", + help="The haz cam point cloud topic in the bag file.", + ) + parser.add_argument( + "--haz_cam_intensity_topic", + dest="haz_cam_intensity_topic", + default="/hw/depth_haz/extended/amplitude_int", + help="The haz cam intensity topic in the bag file.", + ) + parser.add_argument( + "--sci_cam_topic", + dest="sci_cam_topic", + default="/hw/cam_sci", + help="The sci cam topic in the bag file.", + ) + parser.add_argument( + "--camera_type", + dest="camera_type", + default="all", + help="Specify which cameras to process. By default, process all. " + + "Options: nav_cam, sci_cam.", + ) + parser.add_argument( + "--start", + dest="start", + default="0.0", + help="How many seconds into the bag to start processing the data.", + ) + parser.add_argument( + "--duration", + dest="duration", + default="-1.0", + help="For how many seconds to do the processing.", + ) + parser.add_argument( + "--sampling_spacing_seconds", + dest="sampling_spacing_seconds", + default="2", + help="Spacing to use, in seconds, between consecutive depth images in " + "the bag that are processed.", + ) + parser.add_argument( + "--dist_between_processed_cams", + dest="dist_between_processed_cams", + default="0.1", + help="Once an image or depth image is processed, how far the camera " + + "should move (in meters) before it should process more data.", + ) + parser.add_argument( + "--sci_cam_timestamps", + dest="sci_cam_timestamps", + default="", + help="Process only these sci cam timestamps (rather than " + + "any in the bag using --dist_between_processed_cams, etc.). " + + "Must be a file with one timestamp per line.", + ) + parser.add_argument( + "--depth_exclude_columns", + dest="depth_exclude_columns", + default="0", + help="Remove this many columns of data from the rectangular " + + "depth image sensor at margins to avoid distortion.", + ) + parser.add_argument( + "--depth_exclude_rows", + dest="depth_exclude_rows", + default="0", + help="Remove this many rows of data from the rectangular " + + "depth image sensor at margins to avoid distortion.", + ) + parser.add_argument( + "--foreshortening_delta", + dest="foreshortening_delta", + default="5.0", + help="A smaller value here will result in holes in depth images " + + "being filled more aggressively but potentially with more artifacts " + + "in foreshortened regions.", + ) + parser.add_argument( + "--median_filters", + dest="median_filters", + default="7 0.1 25 0.1", + help='Given a list "w1 d1 w2 d2 ... ", remove a depth image point ' + + "if it differs, in the Manhattan norm, from the median of depth points " + + "in the pixel window of size wi centered at it by more than di. This " + + "removes points sticking out for each such i.", + ) + parser.add_argument( + "--depth_hole_fill_diameter", + dest="depth_hole_fill_diameter", + default="30", + help="Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (--max_hole_diameter).", + ) + parser.add_argument( + "--max_ray_length", + dest="max_ray_length", + default="2.0", + help="Process haz cam depth image points no further than " + + "this distance from the camera.", + ) + parser.add_argument( + "--voxel_size", + dest="voxel_size", + default="0.01", + help="When fusing the depth point clouds use a voxel of this size, " + + "in meters.", + ) + parser.add_argument( + "--max_iso_times_exposure", + dest="max_iso_times_exposure", + default="5.1", + help="Apply the inverse gamma transform to images, multiply them by " + + "max_iso_times_exposure/ISO/exposure_time to adjust for " + + "lightning differences, then apply the gamma transform back. " + + "This value should be set to the maximum observed " + + "ISO * exposure_time. The default is 5.1. Not used with simulated data.", + ) + parser.add_argument( + "--smoothing_time", + dest="smoothing_time", + default="0.00005", + help="A larger value will result in a smoother mesh.", + ) + parser.add_argument( + "--max_num_hole_edges", + dest="max_num_hole_edges", + default="1000", + help="Close holes in the mesh which have no more than this many edges.", + ) + parser.add_argument( + "--max_hole_diameter", + dest="max_hole_diameter", + default="0.3", + help="The diameter (in meters) of the largest hole in the mesh to fill.", + ) + parser.add_argument( + "--num_min_faces_in_component", + dest="num_min_faces_in_component", + default="100", + help="Keep only connected mesh components with at least this many faces.", + ) + parser.add_argument( + "--num_components_to_keep", + dest="num_components_to_keep", + default="10", + help="How many of the largest connected components " + + "of the mesh to keep. Being too aggressive here can result in a mesh " + + "with missing parts.", + ) + parser.add_argument( + "--edge_keep_ratio", + dest="edge_keep_ratio", + default="0.2", + help="Simply the mesh keeping only this fraction of the original edges.", + ) + parser.add_argument( + "--merge_maps", + dest="merge_maps", + default="", + help="Given several output geometry mapper directories, specified " + + "as a list in quotes, create a merged textured mesh. The input " + + "bag and sparse map will not be used. Each input geometry mapper run " + + "can have its own bag and sparse map. The sparse maps must be " + + "registered to a global coordinate system and co-registered to " + + "each other, such as when extracted from a larger merged and " + + "registered map.", + ) + parser.add_argument( + "--start_step", + dest="start_step", + default="0", + help="Start processing at this step. Useful for resuming work. " + + "See the doc for options.", + ) + parser.add_argument( + "--astrobee_build_dir", + dest="astrobee_build_dir", + default=astrobee_build_dir, + help="The path to the Astrobee build directory.", + ) + parser.add_argument( + "--localization_options", + dest="localization_options", + default="--min_surf_features 400 --max_surf_features 600 --min_surf_threshold 5 --default_surf_threshold 10 --max_surf_threshold 1000 --early_break_landmarks 400 --verbose_localization", + help="Options to to use to localize the nav cam images.", + ) + + parser.add_argument( + "--use_brisk_map", + dest="use_brisk_map", + action="store_true", + help="Instead of a SURF sparse map made from the same bag that " + + " needs texturing, use a pre-existing and unrelated BRISK map. " + + "This map may be more convenient but less reliable.", + ) + parser.add_argument( + "--simulated_data", + dest="simulated_data", + action="store_true", + help="If specified, use data recorded in simulation. " + + "Then haz and sci camera poses and intrinsics should be recorded " + + "in the bag file.", + ) + parser.add_argument( + "--external_mesh", + dest="external_mesh", + default="", + help="Use this mesh to texture the images, rather than creating one " + + "from depth data in the current bag.", + ) + parser.add_argument( + "--scicam_to_hazcam_timestamp_offset_override_value", + dest="scicam_to_hazcam_timestamp_offset_override_value", + default="", + help="Override the value of scicam_to_hazcam_timestamp_offset " + + "from the robot config file with this value.", + ) + parser.add_argument( + "--verbose", + dest="verbose", + action="store_true", + help="Echo all output in the terminal.", + ) + parser.add_argument( + "--save_debug_data", + dest="save_debug_data", + action="store_true", + help="Save many intermediate datasets for debugging.", + ) args = parser.parse_args() return (src_path, exec_path, args) + def sanity_checks(geometry_mapper_path, batch_tsdf_path, args): # Check if the environemnt was set - for var in 'ASTROBEE_RESOURCE_DIR', 'ASTROBEE_CONFIG_DIR', 'ASTROBEE_WORLD', 'ASTROBEE_ROBOT': + for var in ( + "ASTROBEE_RESOURCE_DIR", + "ASTROBEE_CONFIG_DIR", + "ASTROBEE_WORLD", + "ASTROBEE_ROBOT", + ): if var not in os.environ: raise Exception("Must set " + var) - + if not os.path.exists(geometry_mapper_path): raise Exception("Cannot find the geometry mapper: " + geometry_mapper_path) - + if not os.path.exists(batch_tsdf_path): raise Exception("Cannot find batch_tsdf (a voxblox tool): " + batch_tsdf_path) if not os.path.isdir(args.astrobee_build_dir): - raise Exception("Cannot find the astrobee_build directory. " - + "Specify it via --astrobee_build_dir.") - - if args.camera_type == 'nav_cam' and args.simulated_data: - print("Cannot apply nav cam texture with simulated cameras " + \ - "as the gazebo distorted images are not accurate enough.") + raise Exception( + "Cannot find the astrobee_build directory. " + + "Specify it via --astrobee_build_dir." + ) + + if args.camera_type == "nav_cam" and args.simulated_data: + print( + "Cannot apply nav cam texture with simulated cameras " + + "as the gazebo distorted images are not accurate enough." + ) sys.exit(1) - + if args.output_dir == "": raise Exception("The path to the output directory was not specified.") + def mkdir_p(path): if path == "": - return # this can happen when path is os.path.dirname("myfile.txt") + return # this can happen when path is os.path.dirname("myfile.txt") try: os.makedirs(path) except OSError: if os.path.isdir(path): pass else: - raise Exception('Could not make directory ' + path + ' as a file with this name exists.') + raise Exception( + "Could not make directory " + path + " as a file with this name exists." + ) + def setup_outputs(args): mkdir_p(args.output_dir) -def run_cmd(cmd, log_file, verbose = False): - ''' + +def run_cmd(cmd, log_file, verbose=False): + """ Run a command and write the output to a file. In verbose mode also print to screen. - ''' + """ print(" ".join(cmd) + "\n") - with open(log_file, 'w', buffering = 0) as f: # replace 'w' with 'wb' for Python 3 + with open(log_file, "w", buffering=0) as f: # replace 'w' with 'wb' for Python 3 f.write(" ".join(cmd) + "\n") process = subprocess.Popen(cmd, stdout=subprocess.PIPE) - for line in iter(process.stdout.readline, ''): # replace '' with b'' for Python 3 + for line in iter( + process.stdout.readline, "" + ): # replace '' with b'' for Python 3 if verbose: sys.stdout.write(line) f.write(line) - # If a certain step failed, do not continue - process.wait() - if (process.returncode != 0): + # If a certain step failed, do not continue + process.wait() + if process.returncode != 0: print("Failed execution of: " + " ".join(cmd)) sys.exit(1) - + + def compute_poses_and_clouds(geometry_mapper_path, args): - ''' + """ Invoke the geometry_mapper tool to compute needed camera poses and clouds. - ''' - - cmd = [geometry_mapper_path, - '--ros_bag', args.ros_bag, - '--sparse_map', args.sparse_map, - '--output_dir', args.output_dir, - '--nav_cam_topic', args.nav_cam_topic, - '--haz_cam_points_topic', args.haz_cam_points_topic, - '--haz_cam_intensity_topic', args.haz_cam_intensity_topic, - '--sci_cam_topic', args.sci_cam_topic, - '--camera_type', args.camera_type, - '--start', args.start, - '--duration', args.duration, - '--sampling_spacing_seconds', args.sampling_spacing_seconds, - '--dist_between_processed_cams', args.dist_between_processed_cams, - '--max_iso_times_exposure', args.max_iso_times_exposure, - '--depth_exclude_columns', args.depth_exclude_columns, - '--depth_exclude_rows', args.depth_exclude_rows, - '--foreshortening_delta', args.foreshortening_delta, - '--depth_hole_fill_diameter', args.depth_hole_fill_diameter, - '--median_filters', args.median_filters, - ] + args.localization_options.split(' ') + """ + + cmd = [ + geometry_mapper_path, + "--ros_bag", + args.ros_bag, + "--sparse_map", + args.sparse_map, + "--output_dir", + args.output_dir, + "--nav_cam_topic", + args.nav_cam_topic, + "--haz_cam_points_topic", + args.haz_cam_points_topic, + "--haz_cam_intensity_topic", + args.haz_cam_intensity_topic, + "--sci_cam_topic", + args.sci_cam_topic, + "--camera_type", + args.camera_type, + "--start", + args.start, + "--duration", + args.duration, + "--sampling_spacing_seconds", + args.sampling_spacing_seconds, + "--dist_between_processed_cams", + args.dist_between_processed_cams, + "--max_iso_times_exposure", + args.max_iso_times_exposure, + "--depth_exclude_columns", + args.depth_exclude_columns, + "--depth_exclude_rows", + args.depth_exclude_rows, + "--foreshortening_delta", + args.foreshortening_delta, + "--depth_hole_fill_diameter", + args.depth_hole_fill_diameter, + "--median_filters", + args.median_filters, + ] + args.localization_options.split(" ") if args.sci_cam_timestamps != "": - cmd += ['--sci_cam_timestamps', args.sci_cam_timestamps] - + cmd += ["--sci_cam_timestamps", args.sci_cam_timestamps] + if args.use_brisk_map: - cmd += ['--use_brisk_map'] + cmd += ["--use_brisk_map"] if args.simulated_data: - cmd += ['--simulated_data'] + cmd += ["--simulated_data"] if args.save_debug_data: - cmd += ['--save_debug_data'] + cmd += ["--save_debug_data"] if args.external_mesh != "": - cmd += ['--external_mesh', args.external_mesh] - + cmd += ["--external_mesh", args.external_mesh] + if args.scicam_to_hazcam_timestamp_offset_override_value != "": - cmd += ['--scicam_to_hazcam_timestamp_offset_override_value', - args.scicam_to_hazcam_timestamp_offset_override_value] + cmd += [ + "--scicam_to_hazcam_timestamp_offset_override_value", + args.scicam_to_hazcam_timestamp_offset_override_value, + ] + + log_file = os.path.join(args.output_dir, "geometry_mapper_log.txt") + print( + "Compute camera poses and extract data from the bag. Writing the output log to: " + + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + - log_file = os.path.join(args.output_dir, 'geometry_mapper_log.txt') - print('Compute camera poses and extract data from the bag. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) - def fuse_clouds(batch_tsdf_path, mesh, args): - ''' + """ Invoke the voxblox batch_tsdf tool to fuse the depth images. - ''' - - haz_cam_index = os.path.join(args.output_dir, 'haz_cam_index.txt') + """ + + haz_cam_index = os.path.join(args.output_dir, "haz_cam_index.txt") cmd = [batch_tsdf_path, haz_cam_index, mesh, args.max_ray_length, args.voxel_size] - log_file = os.path.join(args.output_dir, 'voxblox_log.txt') - print('Fusing the depth images with voxblox. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) + log_file = os.path.join(args.output_dir, "voxblox_log.txt") + print( + "Fusing the depth images with voxblox. Writing the output log to: " + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + def smoothe_mesh(input_mesh, output_mesh, args, attempt): - smoothe_mesh_path = os.path.join(os.environ['HOME'], - 'projects/cgal_tools/smoothe_mesh') + smoothe_mesh_path = os.path.join( + os.environ["HOME"], "projects/cgal_tools/smoothe_mesh" + ) if not os.path.exists(smoothe_mesh_path): raise Exception("Cannot find the smoothing tool:" + smoothe_mesh_path) - num_iterations = '1' - smoothe_boundary = '1' - cmd = [smoothe_mesh_path, num_iterations, args.smoothing_time, smoothe_boundary, - input_mesh, output_mesh] + num_iterations = "1" + smoothe_boundary = "1" + cmd = [ + smoothe_mesh_path, + num_iterations, + args.smoothing_time, + smoothe_boundary, + input_mesh, + output_mesh, + ] + + log_file = os.path.join( + args.output_dir, "smooth_mesh_attempt_" + str(attempt) + "_log.txt" + ) + print("Smoothing the mesh. Writing the output log to: " + log_file) + run_cmd(cmd, log_file, verbose=args.verbose) - log_file = os.path.join(args.output_dir, 'smooth_mesh_attempt_' \ - + str(attempt) + '_log.txt') - print('Smoothing the mesh. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) def fill_holes_in_mesh(input_mesh, output_mesh, args, attempt): - fill_holes_path = os.path.join(os.environ['HOME'], 'projects/cgal_tools/fill_holes') + fill_holes_path = os.path.join(os.environ["HOME"], "projects/cgal_tools/fill_holes") if not os.path.exists(fill_holes_path): raise Exception("Cannot find the hole-filling tool:" + fill_holes_path) - cmd = [fill_holes_path, args.max_hole_diameter, - args.max_num_hole_edges, input_mesh, output_mesh] + cmd = [ + fill_holes_path, + args.max_hole_diameter, + args.max_num_hole_edges, + input_mesh, + output_mesh, + ] + + log_file = os.path.join( + args.output_dir, "fill_holes_attempt_" + str(attempt) + "_log.txt" + ) + print("Hole-filling the mesh. Writing the output log to: " + log_file) + run_cmd(cmd, log_file, verbose=args.verbose) - log_file = os.path.join(args.output_dir, 'fill_holes_attempt_' \ - + str(attempt) + '_log.txt') - print('Hole-filling the mesh. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) def rm_connected_components(input_mesh, output_mesh, args): - rm_connected_components_path = os.path.join(os.environ['HOME'], - 'projects/cgal_tools/rm_connected_components') + rm_connected_components_path = os.path.join( + os.environ["HOME"], "projects/cgal_tools/rm_connected_components" + ) if not os.path.exists(rm_connected_components_path): raise Exception("Cannot find the tool:" + rm_connected_components_path) - cmd = [rm_connected_components_path, args.num_components_to_keep, - args.num_min_faces_in_component, input_mesh, output_mesh] + cmd = [ + rm_connected_components_path, + args.num_components_to_keep, + args.num_min_faces_in_component, + input_mesh, + output_mesh, + ] + + log_file = os.path.join(args.output_dir, "rm_connected_components_log.txt") + print( + "Removing small connected components from mesh. Writing the output log to: " + + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) - log_file = os.path.join(args.output_dir, 'rm_connected_components_log.txt') - print('Removing small connected components from mesh. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) def simplify_mesh(input_mesh, output_mesh, args): - simplify_mesh_path = os.path.join(os.environ['HOME'], - 'projects/cgal_tools/simplify_mesh') + simplify_mesh_path = os.path.join( + os.environ["HOME"], "projects/cgal_tools/simplify_mesh" + ) if not os.path.exists(simplify_mesh_path): raise Exception("Cannot find the tool:" + simplify_mesh_path) cmd = [simplify_mesh_path, args.edge_keep_ratio, input_mesh, output_mesh] - log_file = os.path.join(args.output_dir, 'simplify_mesh_log.txt') - print('Simplifying the mesh. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) + log_file = os.path.join(args.output_dir, "simplify_mesh_log.txt") + print("Simplifying the mesh. Writing the output log to: " + log_file) + run_cmd(cmd, log_file, verbose=args.verbose) -def undistort_images(args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale): - - undistort_image_path = os.path.join(args.astrobee_build_dir, - 'devel/lib/camera/undistort_image') + +def undistort_images( + args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale +): + + undistort_image_path = os.path.join( + args.astrobee_build_dir, "devel/lib/camera/undistort_image" + ) if not os.path.exists(undistort_image_path): raise Exception("Cannot find the undistort_image tool: " + undistort_image_path) - - cmd = [undistort_image_path, '--undistorted_crop_win', undist_crop_win, - '--save_bgr', '--robot_camera', cam_type, '-image_list', dist_image_list, - '--scale', str(scale), '--output_directory', undist_dir] - log_file = os.path.join(args.output_dir, 'undist_' + cam_type + '_log.txt') - print('Undistorting ' + cam_type + ' images. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) + cmd = [ + undistort_image_path, + "--undistorted_crop_win", + undist_crop_win, + "--save_bgr", + "--robot_camera", + cam_type, + "-image_list", + dist_image_list, + "--scale", + str(scale), + "--output_directory", + undist_dir, + ] + + log_file = os.path.join(args.output_dir, "undist_" + cam_type + "_log.txt") + print( + "Undistorting " + cam_type + " images. Writing the output log to: " + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + def create_texrecon_cameras(args, src_path, undist_dir, cam_type): - cam_to_texrecon_path = os.path.join(src_path, 'cameras_to_texrecon.py') + cam_to_texrecon_path = os.path.join(src_path, "cameras_to_texrecon.py") if not os.path.exists(cam_to_texrecon_path): raise Exception("Cannot find: " + cam_to_texrecon_path) - cmd = ['python', cam_to_texrecon_path, '--camera_dir', args.output_dir, - '--undistorted_image_dir', undist_dir, '--camera_type', cam_type] - - log_file = os.path.join(args.output_dir, 'setup_texrecon_' + cam_type + '_log.txt') - print('Preparing texrecon cameras for ' + cam_type + '. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) + cmd = [ + "python", + cam_to_texrecon_path, + "--camera_dir", + args.output_dir, + "--undistorted_image_dir", + undist_dir, + "--camera_type", + cam_type, + ] + + log_file = os.path.join(args.output_dir, "setup_texrecon_" + cam_type + "_log.txt") + print( + "Preparing texrecon cameras for " + + cam_type + + ". Writing the output log to: " + + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + def run_texrecon(args, src_path, mesh, undist_dir, cam_type): # That is one long path texrecon_path = os.path.dirname(os.path.dirname(os.path.dirname(exec_path))) - texrecon_path = os.path.join(texrecon_path, - 'build/geometry_mapper/texrecon/src/texrecon-build', - 'apps/texrecon/texrecon') + texrecon_path = os.path.join( + texrecon_path, + "build/geometry_mapper/texrecon/src/texrecon-build", + "apps/texrecon/texrecon", + ) if not os.path.exists(texrecon_path): raise Exception("Cannot find: " + texrecon_path) - - texrecon_dir=os.path.join(args.output_dir, cam_type + '_texture/run') + + texrecon_dir = os.path.join(args.output_dir, cam_type + "_texture/run") parent_dir = os.path.dirname(texrecon_dir) if os.path.isdir(parent_dir): # Wipe the existing directory print("Removing recursively old directory: " + parent_dir) shutil.rmtree(parent_dir) mkdir_p(texrecon_dir) - - cmd = [texrecon_path, undist_dir, mesh, texrecon_dir, - '-o', 'gauss_clamping', '-d', 'view_dir_dot_face_dir', - '--keep_unseen_faces'] - - log_file = os.path.join(args.output_dir, 'texrecon_' + cam_type + '_log.txt') - print('Running texrecon for ' + cam_type + '. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) - - textured_mesh = texrecon_dir + '.obj' + + cmd = [ + texrecon_path, + undist_dir, + mesh, + texrecon_dir, + "-o", + "gauss_clamping", + "-d", + "view_dir_dot_face_dir", + "--keep_unseen_faces", + ] + + log_file = os.path.join(args.output_dir, "texrecon_" + cam_type + "_log.txt") + print( + "Running texrecon for " + cam_type + ". Writing the output log to: " + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + + textured_mesh = texrecon_dir + ".obj" return textured_mesh + # Find the ratio of a sci cam image's width and the width from the camera config file. def find_sci_cam_scale(image_file): # width from the imag file img = cv2.imread(image_file) image_width = img.shape[1] - + # Width from the config file - config_file = os.environ['ASTROBEE_CONFIG_DIR'] + '/cameras.config' - - with open(config_file, 'r') as file: + config_file = os.environ["ASTROBEE_CONFIG_DIR"] + "/cameras.config" + + with open(config_file, "r") as file: text = file.read() - m = re.match('^.*?sci_cam\s*=\s*\{\s*width\s*=\s*(\d+)', text, re.DOTALL) + m = re.match("^.*?sci_cam\s*=\s*\{\s*width\s*=\s*(\d+)", text, re.DOTALL) if not m: print("Could not parse sci cam width from: " + config_file) sys.exit(1) config_width = m.group(1) - - return float(image_width)/float(config_width) + + return float(image_width) / float(config_width) + def texture_mesh(src_path, cam_type, mesh, args): - dist_image_list = os.path.join(args.output_dir, cam_type + '_index.txt') + dist_image_list = os.path.join(args.output_dir, cam_type + "_index.txt") with open(dist_image_list) as f: image_files = f.readlines() if len(image_files) == 0: @@ -430,26 +690,28 @@ def texture_mesh(src_path, cam_type, mesh, args): print("Found no images for: " + cam_type) return "" - dist_dir = os.path.join(args.output_dir, 'distorted_' + cam_type) - undist_dir = os.path.join(args.output_dir, 'undistorted_' + cam_type) + dist_dir = os.path.join(args.output_dir, "distorted_" + cam_type) + undist_dir = os.path.join(args.output_dir, "undistorted_" + cam_type) if not args.simulated_data: - + scale = 1.0 - undist_crop_win = '' - + undist_crop_win = "" + if cam_type == "nav_cam": scale = 1.0 - undist_crop_win = '1100 776' - elif cam_type == 'sci_cam': + undist_crop_win = "1100 776" + elif cam_type == "sci_cam": # Deal with the fact that the robot config file may assume # a different resolution than what is in the bag scale = find_sci_cam_scale(image_files[0].rstrip()) - crop_wd = 2*int(round(625.0 * scale)) # an even number - crop_ht = 2*int(round(500.0 * scale)) # an even number - undist_crop_win = str(crop_wd) + ' ' + str(crop_ht) - - undistort_images(args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale) + crop_wd = 2 * int(round(625.0 * scale)) # an even number + crop_ht = 2 * int(round(500.0 * scale)) # an even number + undist_crop_win = str(crop_wd) + " " + str(crop_ht) + + undistort_images( + args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale + ) create_texrecon_cameras(args, src_path, undist_dir, cam_type) textured_mesh = run_texrecon(args, src_path, mesh, undist_dir, cam_type) else: @@ -459,6 +721,7 @@ def texture_mesh(src_path, cam_type, mesh, args): return textured_mesh + def copy_dir(src, dst, symlinks=False, ignore=None): for item in os.listdir(src): s = os.path.join(src, item) @@ -467,11 +730,12 @@ def copy_dir(src, dst, symlinks=False, ignore=None): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) - + + def merge_poses_and_clouds(args): - ''' + """ Merge individual reconstructions from previous invocations of this tool. - ''' + """ input_dirs = args.merge_maps.split() if len(input_dirs) == 0: @@ -487,9 +751,9 @@ def merge_poses_and_clouds(args): items = os.listdir(input_dir) for item in items: - if item.endswith('_log.txt'): + if item.endswith("_log.txt"): continue - + # This copy is awkward because shutil.copytree cannot copy into an existing directory s = os.path.join(input_dir, item) d = os.path.join(output_dir, item) @@ -510,11 +774,11 @@ def merge_poses_and_clouds(args): d2 = os.path.join(d, item2) if not os.path.isdir(s2): shutil.copy2(s2, d2) - + # merge the index files index_files = os.listdir(output_dir) for index_file in index_files: - if not index_file.endswith('index.txt'): + if not index_file.endswith("index.txt"): continue # Merge the index files by concatenating them @@ -528,14 +792,17 @@ def merge_poses_and_clouds(args): for line in f_in.readlines(): line = line.replace(input_dir, output_dir) f_out.write(line) - -if __name__ == '__main__': - + + +if __name__ == "__main__": + (src_path, exec_path, args) = process_args(sys.argv) - geometry_mapper_path = os.path.join(exec_path, 'geometry_mapper') - batch_tsdf_path = os.path.join(os.environ['HOME'], 'catkin_ws/devel/lib/voxblox_ros/batch_tsdf') - + geometry_mapper_path = os.path.join(exec_path, "geometry_mapper") + batch_tsdf_path = os.path.join( + os.environ["HOME"], "catkin_ws/devel/lib/voxblox_ros/batch_tsdf" + ) + sanity_checks(geometry_mapper_path, batch_tsdf_path, args) setup_outputs(args) @@ -548,67 +815,68 @@ def merge_poses_and_clouds(args): else: merge_poses_and_clouds(args) - fused_mesh = os.path.join(args.output_dir, 'fused_mesh.ply') + fused_mesh = os.path.join(args.output_dir, "fused_mesh.ply") if start_step <= 1 and args.external_mesh == "": fuse_clouds(batch_tsdf_path, fused_mesh, args) - + # Smothing must happen before hole-filling, to remove weird # artifacts - smooth_mesh = os.path.join(args.output_dir, 'smooth_mesh.ply') + smooth_mesh = os.path.join(args.output_dir, "smooth_mesh.ply") if start_step <= 2 and args.external_mesh == "": attempt = 1 smoothe_mesh(fused_mesh, smooth_mesh, args, attempt) # Fill holes - hole_filled_mesh = os.path.join(args.output_dir, 'hole_filled_mesh.ply') + hole_filled_mesh = os.path.join(args.output_dir, "hole_filled_mesh.ply") if start_step <= 3 and args.external_mesh == "": attempt = 1 fill_holes_in_mesh(smooth_mesh, hole_filled_mesh, args, attempt) # Rm small connected components - clean_mesh = os.path.join(args.output_dir, 'clean_mesh.ply') + clean_mesh = os.path.join(args.output_dir, "clean_mesh.ply") if start_step <= 4 and args.external_mesh == "": rm_connected_components(hole_filled_mesh, clean_mesh, args) - # Smoothe again - smooth_mesh2 = os.path.join(args.output_dir, 'smooth_mesh2.ply') + # Smoothe again + smooth_mesh2 = os.path.join(args.output_dir, "smooth_mesh2.ply") if start_step <= 5 and args.external_mesh == "": attempt = 2 smoothe_mesh(clean_mesh, smooth_mesh2, args, attempt) - + # Fill holes again. That is necessary, and should happen after smoothing. # Mesh cleaning creates small holes and they can't be cleaned well # without some more smoothing like above. - hole_filled_mesh2 = os.path.join(args.output_dir, 'hole_filled_mesh2.ply') + hole_filled_mesh2 = os.path.join(args.output_dir, "hole_filled_mesh2.ply") if start_step <= 6 and args.external_mesh == "": attempt = 2 fill_holes_in_mesh(smooth_mesh2, hole_filled_mesh2, args, attempt) # Smoothe again as filling holes can make the mesh a little rough - smooth_mesh3 = os.path.join(args.output_dir, 'smooth_mesh3.ply') + smooth_mesh3 = os.path.join(args.output_dir, "smooth_mesh3.ply") if start_step <= 7 and args.external_mesh == "": attempt = 3 smoothe_mesh(hole_filled_mesh2, smooth_mesh3, args, attempt) # Simplify the mesh - simplified_mesh = os.path.join(args.output_dir, 'simplified_mesh.ply') + simplified_mesh = os.path.join(args.output_dir, "simplified_mesh.ply") if start_step <= 8 and args.external_mesh == "": simplify_mesh(smooth_mesh3, simplified_mesh, args) if args.external_mesh != "": # So that can texture on top of it simplified_mesh = args.external_mesh - - nav_textured_mesh = '' - sci_textured_mesh = '' + + nav_textured_mesh = "" + sci_textured_mesh = "" if start_step <= 9: # For simulated data texturing nav cam images is not supported - if (not args.simulated_data) and (args.camera_type == 'all' or \ - args.camera_type == 'nav_cam'): - nav_textured_mesh = texture_mesh(src_path, 'nav_cam', simplified_mesh, args) + if (not args.simulated_data) and ( + args.camera_type == "all" or args.camera_type == "nav_cam" + ): + nav_textured_mesh = texture_mesh(src_path, "nav_cam", simplified_mesh, args) - if args.camera_type == 'all' or args.camera_type == 'sci_cam': - sci_textured_mesh = texture_mesh(src_path, 'sci_cam', simplified_mesh, args) + if args.camera_type == "all" or args.camera_type == "sci_cam": + sci_textured_mesh = texture_mesh(src_path, "sci_cam", simplified_mesh, args) if args.external_mesh == "": print("Fused mesh: " + fused_mesh) @@ -621,7 +889,7 @@ def merge_poses_and_clouds(args): print("Simplified mesh: " + simplified_mesh) else: print("External mesh: " + args.external_mesh) - + if nav_textured_mesh != "": print("Nav cam textured mesh: " + nav_textured_mesh) diff --git a/img_analysis/resources/analyse_img.py b/img_analysis/resources/analyse_img.py deleted file mode 100644 index 3336ae4c..00000000 --- a/img_analysis/resources/analyse_img.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2021, United States Government, as represented by the -# Administrator of the National Aeronautics and Space Administration. -# -# All rights reserved. -# -# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking -# platform" software is licensed under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import torch -from torch import nn -from torch import optim -import torch.nn.functional as F -from torchvision import datasets, transforms, models -import numpy as np -from pathlib import Path -import matplotlib.pyplot as plt -from PIL import Image - -import torchvision.transforms as transforms - -# Parameters -image = Image.open("./analysed_images/img3.jpg") - -test_transforms = transforms.Compose([transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) -image_tensor = test_transforms(image).float() -image_tensor = image_tensor.unsqueeze_(0) - -print(image_tensor[0, :5]) - - -model = models.densenet121(pretrained=True) -model.classifier = nn.Sequential(nn.Linear(1024, 256), - nn.ReLU(), - nn.Dropout(0.2), - nn.Linear(256, 3), - nn.LogSoftmax(dim=1)) -model.load_state_dict(torch.load('model_cnn.pt')) -model.eval() - -output = model(image_tensor) -print(output) - -if output is 0: - print("Vent is obstructed") -else if output is 1: - print("Vent is free") -else if output is 2: - print("Could not identify a vent") - -index = output.data.numpy().argmax() -print(index) - -# 0 : -0.3883 -2.1018 -1.6115 -# 1 : -0.3694 -1.6222 -2.1950 -# 2 : -0.029524 -3.546348 -8.242915 -# 3 : -0.9386 -0.5036 -5.4123 \ No newline at end of file diff --git a/img_analysis/resources/vent_cnn.py b/img_analysis/resources/vent_cnn.py deleted file mode 100644 index 6de3063a..00000000 --- a/img_analysis/resources/vent_cnn.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) 2021, United States Government, as represented by the -# Administrator of the National Aeronautics and Space Administration. -# -# All rights reserved. -# -# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking -# platform" software is licensed under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import matplotlib.pyplot as plt - -import torch -from torch import nn -from torch import optim -import torch.nn.functional as F -from torchvision import datasets, transforms, models -import numpy as np -from pathlib import Path -import matplotlib.pyplot as plt - -# Parameters -data_dir = str(Path.home()) + '/datasets/vent_analysis' # specify data path -classes = ['free', 'obstacle', 'unknown'] # specify the image classes -num_epochs = 30 # number of epochs to train -model_name = 'model_cnn.pt' # saved model name -trace_model_name = "traced_model_cnn.pt" # saved traced model name - -# Define transforms for the training data and testing data -train_transforms = transforms.Compose([transforms.Resize([256, 256]), - transforms.RandomRotation(30), - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) - -test_transforms = transforms.Compose([transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) - -# Pass transforms in here, then run the next cell to see how the transforms look -train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) -test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) - -trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True) -testloader = torch.utils.data.DataLoader(test_data, batch_size=64) - -# Visualize some of the data----------------------- -# helper function to un-normalize and display an image -def imshow(img): - img = img / 2 + 0.5 # unnormalize - plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image -# obtain one batch of training images -dataiter = iter(trainloader) -images, labels = dataiter.next() -images = images.numpy() # convert images to numpy for display -# plot the images in the batch, along with the corresponding labels -fig = plt.figure(figsize=(25, 4)) -# display 20 images -for idx in np.arange(20): - ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) - imshow(images[idx]) - ax.set_title(classes[labels[idx]]) - -# Use GPU if it's available -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -model = models.densenet121(pretrained=True) - -# Freeze parameters so we don't backprop through them -for param in model.parameters(): - param.requires_grad = False - -model.classifier = nn.Sequential(nn.Linear(1024, 256), - nn.ReLU(), - nn.Dropout(0.2), - nn.Linear(256, 3), - nn.LogSoftmax(dim=1)) - -criterion = nn.NLLLoss() - -# Only train the classifier parameters, feature parameters are frozen -optimizer = optim.Adam(model.classifier.parameters(), lr=0.003) - -model.to(device); - -epochs = num_epochs -steps = 0 -running_loss = 0 -print_every = 5 -test_loss_min = np.Inf -for epoch in range(epochs): - for inputs, labels in trainloader: - steps += 1 - # Move input and label tensors to the default device - inputs, labels = inputs.to(device), labels.to(device) - - optimizer.zero_grad() - - logps = model.forward(inputs) - loss = criterion(logps, labels) - loss.backward() - optimizer.step() - - running_loss += loss.item() - - if steps % print_every == 0: - test_loss = 0 - accuracy = 0 - model.eval() - with torch.no_grad(): - for inputs, labels in testloader: - inputs, labels = inputs.to(device), labels.to(device) - logps = model.forward(inputs) - batch_loss = criterion(logps, labels) - - test_loss += batch_loss.item() - - # Calculate accuracy - ps = torch.exp(logps) - top_p, top_class = ps.topk(1, dim=1) - equals = top_class == labels.view(*top_class.shape) - accuracy += torch.mean(equals.type(torch.FloatTensor)).item() - - print("Epoch ", epoch + 1, "/", epochs, ".. " - "Train loss: ", running_loss / print_every, ".. " - "Test loss: ", test_loss / len(testloader), ".. " - "Test accuracy: ", accuracy / len(testloader), "") - running_loss = 0 - - # save model if validation loss has decreased - if test_loss <= test_loss_min: - torch.save(model.state_dict(), model_name) - test_loss_min = test_loss - - model.to("cpu"); - # An example input you would normally provide to your model's forward() method. - example = torch.rand(1, 3, 224, 224) - # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing. - traced_script_module = torch.jit.trace(model, example) - output = traced_script_module(torch.ones(1, 3, 224, 224)) - print(output) - traced_script_module.save(trace_model_name) - model.to("cuda"); - - model.train() diff --git a/img_analysis/scripts/analyse_img.py b/img_analysis/scripts/analyse_img.py index 3336ae4c..ae9a58aa 100644 --- a/img_analysis/scripts/analyse_img.py +++ b/img_analysis/scripts/analyse_img.py @@ -16,25 +16,27 @@ # License for the specific language governing permissions and limitations # under the License. -import torch -from torch import nn -from torch import optim -import torch.nn.functional as F -from torchvision import datasets, transforms, models -import numpy as np from pathlib import Path -import matplotlib.pyplot as plt -from PIL import Image +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F import torchvision.transforms as transforms +from PIL import Image +from torch import nn, optim +from torchvision import datasets, models, transforms # Parameters image = Image.open("./analysed_images/img3.jpg") -test_transforms = transforms.Compose([transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) +test_transforms = transforms.Compose( + [ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] +) image_tensor = test_transforms(image).float() image_tensor = image_tensor.unsqueeze_(0) @@ -42,23 +44,25 @@ model = models.densenet121(pretrained=True) -model.classifier = nn.Sequential(nn.Linear(1024, 256), - nn.ReLU(), - nn.Dropout(0.2), - nn.Linear(256, 3), - nn.LogSoftmax(dim=1)) -model.load_state_dict(torch.load('model_cnn.pt')) +model.classifier = nn.Sequential( + nn.Linear(1024, 256), + nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(256, 3), + nn.LogSoftmax(dim=1), +) +model.load_state_dict(torch.load("model_cnn.pt")) model.eval() output = model(image_tensor) print(output) if output is 0: - print("Vent is obstructed") -else if output is 1: - print("Vent is free") -else if output is 2: - print("Could not identify a vent") + print("Vent is obstructed") +elif output is 1: + print("Vent is free") +elif output is 2: + print("Could not identify a vent") index = output.data.numpy().argmax() print(index) @@ -66,4 +70,4 @@ # 0 : -0.3883 -2.1018 -1.6115 # 1 : -0.3694 -1.6222 -2.1950 # 2 : -0.029524 -3.546348 -8.242915 -# 3 : -0.9386 -0.5036 -5.4123 \ No newline at end of file +# 3 : -0.9386 -0.5036 -5.4123 diff --git a/img_analysis/scripts/train_cnn_vent.py b/img_analysis/scripts/train_cnn_vent.py index 6de3063a..d04a222e 100644 --- a/img_analysis/scripts/train_cnn_vent.py +++ b/img_analysis/scripts/train_cnn_vent.py @@ -16,41 +16,45 @@ # License for the specific language governing permissions and limitations # under the License. -import matplotlib.pyplot as plt +from pathlib import Path +import matplotlib.pyplot as plt +import numpy as np import torch -from torch import nn -from torch import optim import torch.nn.functional as F -from torchvision import datasets, transforms, models -import numpy as np -from pathlib import Path -import matplotlib.pyplot as plt +from torch import nn, optim +from torchvision import datasets, models, transforms # Parameters -data_dir = str(Path.home()) + '/datasets/vent_analysis' # specify data path -classes = ['free', 'obstacle', 'unknown'] # specify the image classes -num_epochs = 30 # number of epochs to train -model_name = 'model_cnn.pt' # saved model name -trace_model_name = "traced_model_cnn.pt" # saved traced model name +data_dir = str(Path.home()) + "/datasets/vent_analysis" # specify data path +classes = ["free", "obstacle", "unknown"] # specify the image classes +num_epochs = 30 # number of epochs to train +model_name = "model_cnn.pt" # saved model name +trace_model_name = "traced_model_cnn.pt" # saved traced model name # Define transforms for the training data and testing data -train_transforms = transforms.Compose([transforms.Resize([256, 256]), - transforms.RandomRotation(30), - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) - -test_transforms = transforms.Compose([transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) +train_transforms = transforms.Compose( + [ + transforms.Resize([256, 256]), + transforms.RandomRotation(30), + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] +) + +test_transforms = transforms.Compose( + [ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] +) # Pass transforms in here, then run the next cell to see how the transforms look -train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) -test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) +train_data = datasets.ImageFolder(data_dir + "/train", transform=train_transforms) +test_data = datasets.ImageFolder(data_dir + "/test", transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True) testloader = torch.utils.data.DataLoader(test_data, batch_size=64) @@ -60,15 +64,17 @@ def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image + + # obtain one batch of training images dataiter = iter(trainloader) images, labels = dataiter.next() -images = images.numpy() # convert images to numpy for display +images = images.numpy() # convert images to numpy for display # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) # display 20 images for idx in np.arange(20): - ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) + ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title(classes[labels[idx]]) @@ -81,18 +87,20 @@ def imshow(img): for param in model.parameters(): param.requires_grad = False -model.classifier = nn.Sequential(nn.Linear(1024, 256), - nn.ReLU(), - nn.Dropout(0.2), - nn.Linear(256, 3), - nn.LogSoftmax(dim=1)) +model.classifier = nn.Sequential( + nn.Linear(1024, 256), + nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(256, 3), + nn.LogSoftmax(dim=1), +) criterion = nn.NLLLoss() # Only train the classifier parameters, feature parameters are frozen optimizer = optim.Adam(model.classifier.parameters(), lr=0.003) -model.to(device); +model.to(device) epochs = num_epochs steps = 0 @@ -132,10 +140,19 @@ def imshow(img): equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)).item() - print("Epoch ", epoch + 1, "/", epochs, ".. " - "Train loss: ", running_loss / print_every, ".. " - "Test loss: ", test_loss / len(testloader), ".. " - "Test accuracy: ", accuracy / len(testloader), "") + print( + "Epoch ", + epoch + 1, + "/", + epochs, + ".. " "Train loss: ", + running_loss / print_every, + ".. " "Test loss: ", + test_loss / len(testloader), + ".. " "Test accuracy: ", + accuracy / len(testloader), + "", + ) running_loss = 0 # save model if validation loss has decreased @@ -143,7 +160,7 @@ def imshow(img): torch.save(model.state_dict(), model_name) test_loss_min = test_loss - model.to("cpu"); + model.to("cpu") # An example input you would normally provide to your model's forward() method. example = torch.rand(1, 3, 224, 224) # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing. @@ -151,6 +168,6 @@ def imshow(img): output = traced_script_module(torch.ones(1, 3, 224, 224)) print(output) traced_script_module.save(trace_model_name) - model.to("cuda"); + model.to("cuda") model.train() diff --git a/scripts/docker/build.sh b/scripts/docker/build.sh index 0d2b4661..dd6e8541 100755 --- a/scripts/docker/build.sh +++ b/scripts/docker/build.sh @@ -126,6 +126,8 @@ if [ $vm == 0 ]; then docker build ${astrobee_source}/ \ -f ${astrobee_source}/scripts/docker/astrobee.Dockerfile \ --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ + --build-arg ROS_VERSION=${ROS_VERSION} \ + --build-arg PYTHON=${PYTHON} \ -t astrobee/astrobee:latest-ubuntu${UBUNTU_VERSION} # Build ISAAC diff --git a/scripts/git/cpplint_repo.py b/scripts/git/cpplint_repo.py index bf43bbe3..7ca56870 100755 --- a/scripts/git/cpplint_repo.py +++ b/scripts/git/cpplint_repo.py @@ -24,6 +24,7 @@ num_errors = 0 + def get_cpplint_path(): return os.path.dirname(os.path.realpath(__file__)) + "/cpplint.py" @@ -31,6 +32,7 @@ def get_cpplint_path(): def get_repo_path(): return os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/../..") + def run_cpplint(filename, cpplint_path): cpplint = importlib.machinery.SourceFileLoader( "cpplint", cpplint_path @@ -47,10 +49,12 @@ def run_cpplint(filename, cpplint_path): cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level) return cpplint.output + def print_objection(): print("Code formatting errors were found.") print("==================================") + def main(): num_errors = 0 diff --git a/scripts/setup/build_install_dependencies.sh b/scripts/setup/build_install_dependencies.sh index 4fd0b0e6..22a40ff5 100755 --- a/scripts/setup/build_install_dependencies.sh +++ b/scripts/setup/build_install_dependencies.sh @@ -23,7 +23,11 @@ DEP_LOC=$(dirname "$(readlink -f "$0")")/dependencies -#sudo apt-get install -y +sudo apt-get install -y libgtest-dev +cd /usr/src/gtest +sudo cmake CMakeLists.txt +sudo make +sudo cp *.a /usr/lib cd ${DEP_LOC} ./build_install_gp.sh || exit 1 From 39fa8b85643a82e0f56dbdd75197cc781ff8ee64 Mon Sep 17 00:00:00 2001 From: Marina Moreira Date: Wed, 1 Dec 2021 12:48:52 -0800 Subject: [PATCH 02/40] removing slashes --- .github/workflows/ci_push.yml | 60 ++++++++++++++++---------------- .github/workflows/ci_release.yml | 60 ++++++++++++++++---------------- 2 files changed, 60 insertions(+), 60 deletions(-) diff --git a/.github/workflows/ci_push.yml b/.github/workflows/ci_push.yml index 9ff51af1..832fc89e 100644 --- a/.github/workflows/ci_push.yml +++ b/.github/workflows/ci_push.yml @@ -17,19 +17,19 @@ jobs: run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04 - name: Log in to registry @@ -51,19 +51,19 @@ jobs: run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04 - name: Log in to registry @@ -85,19 +85,19 @@ jobs: run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04 - name: Log in to registry diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml index 547c8e24..05820f7d 100644 --- a/.github/workflows/ci_release.yml +++ b/.github/workflows/ci_release.yml @@ -17,19 +17,19 @@ jobs: run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa -t isaac/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=isaac \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=isaac -t isaac/isaac:latest-ubuntu16.04 - name: Log in to registry @@ -54,19 +54,19 @@ jobs: run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa -t isaac/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=isaac \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=isaac -t isaac/isaac:latest-ubuntu18.04 - name: Log in to registry @@ -91,19 +91,19 @@ jobs: run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa -t isaac/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=isaac \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=isaac -t isaac/isaac:latest-ubuntu20.04 - name: Log in to registry From 293483cd9c7864154b75d64ca4a8d92accca6d6d Mon Sep 17 00:00:00 2001 From: Marina Moreira Date: Wed, 1 Dec 2021 15:30:36 -0800 Subject: [PATCH 03/40] fixing tag name --- .github/workflows/ci_push.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci_push.yml b/.github/workflows/ci_push.yml index 832fc89e..9ba53e2c 100644 --- a/.github/workflows/ci_push.yml +++ b/.github/workflows/ci_push.yml @@ -37,7 +37,7 @@ jobs: - name: Push Docker image run: | - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu16.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04; fi; build-bionic: @@ -71,7 +71,7 @@ jobs: - name: Push Docker image run: | - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu18.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04; fi; build-focal: @@ -105,5 +105,5 @@ jobs: - name: Push Docker image run: | - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu20.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04; fi; From 2b072ccbe0a80518583c2a2fcd0f76a17d84e446 Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Thu, 2 Dec 2021 18:42:42 -0800 Subject: [PATCH 04/40] dense_mapper: Add reliability weights and happly.h --- dense_map/geometry_mapper/include/happly.h | 2013 +++++++++++++++++ dense_map/geometry_mapper/readme.md | 28 +- .../geometry_mapper/tools/camera_refiner.cc | 4 +- .../geometry_mapper/tools/geometry_mapper.cc | 226 +- .../geometry_mapper/tools/geometry_mapper.py | 6 +- licenses.csv | 3 +- scripts/git/cpplint_repo.py | 2 +- scripts/git/pre-commit.linter_cpp | 1 + scripts/git/pre-commit.linter_python | 32 +- 9 files changed, 2200 insertions(+), 115 deletions(-) create mode 100644 dense_map/geometry_mapper/include/happly.h mode change 100755 => 100644 scripts/git/cpplint_repo.py diff --git a/dense_map/geometry_mapper/include/happly.h b/dense_map/geometry_mapper/include/happly.h new file mode 100644 index 00000000..01d84502 --- /dev/null +++ b/dense_map/geometry_mapper/include/happly.h @@ -0,0 +1,2013 @@ +#pragma once + +/* A header-only implementation of the .ply file format. + * https://github.com/nmwsharp/happly + * By Nicholas Sharp - nsharp@cs.cmu.edu + * + * Version 2, July 20, 2019 + */ + +/* +MIT License + +Copyright (c) 2018 Nick Sharp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + + +// clang-format off +/* + + === Changelog === + + Significant changes to the file recorded here. + + - Version 5 (Aug 22, 2020) Minor: skip blank lines before properties in ASCII files + - Version 4 (Sep 11, 2019) Change internal list format to be flat. Other small perf fixes and cleanup. + - Version 3 (Aug 1, 2019) Add support for big endian and obj_info + - Version 2 (July 20, 2019) Catch exceptions by const reference. + - Version 1 (undated) Initial version. Unnamed changes before version numbering. + +*/ +// clang-format on + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// General namespace wrapping all Happly things. +namespace happly { + +// Enum specifying binary or ASCII filetypes. Binary can be little-endian +// (default) or big endian. +enum class DataFormat { ASCII, Binary, BinaryBigEndian }; + +// Type name strings +// clang-format off +template std::string typeName() { return "unknown"; } +template<> inline std::string typeName() { return "char"; } +template<> inline std::string typeName() { return "uchar"; } +template<> inline std::string typeName() { return "short"; } +template<> inline std::string typeName() { return "ushort"; } +template<> inline std::string typeName() { return "int"; } +template<> inline std::string typeName() { return "uint"; } +template<> inline std::string typeName() { return "float"; } +template<> inline std::string typeName() { return "double"; } + +// Template hackery that makes getProperty() and friends pretty while automatically picking up smaller types +namespace { + +// A pointer for the equivalent/smaller equivalent of a type (eg. when a double is requested a float works too, etc) +// long int is intentionally absent to avoid platform confusion +template struct TypeChain { bool hasChildType = false; typedef T type; }; +template <> struct TypeChain { bool hasChildType = true; typedef int32_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef int16_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef int8_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef uint32_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef uint16_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef uint8_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef float type; }; + +template struct CanonicalName { typedef T type; }; +template <> struct CanonicalName { typedef int8_t type; }; +template <> struct CanonicalName { typedef uint8_t type; }; +template <> struct CanonicalName { typedef std::conditional::type, int>::value, uint32_t, uint64_t>::type type; }; + +// Used to change behavior of >> for 8bit ints, which does not do what we want. +template struct SerializeType { typedef T type; }; +template <> struct SerializeType { typedef int32_t type; }; +template <> struct SerializeType< int8_t> { typedef int32_t type; }; + +// Give address only if types are same (used below when conditionally copying data) +// last int/char arg is to resolve ambiguous overloads, just always pass 0 and the int version will be preferred +template +S* addressIfSame(T&, char) { + throw std::runtime_error("tried to take address for types that are not same"); + return nullptr;} +template +S* addressIfSame(S& t, int) {return &t;} + +// clang-format on +} // namespace + +/** + * @brief A generic property, which is associated with some element. Can be plain Property or a ListProperty, of some + * type. Generally, the user should not need to interact with these directly, but they are exposed in case someone + * wants to get clever. + */ +class Property { + +public: + /** + * @brief Create a new Property with the given name. + * + * @param name_ + */ + Property(const std::string& name_) : name(name_){}; + virtual ~Property(){}; + + std::string name; + + /** + * @brief Reserve memory. + * + * @param capacity Expected number of elements. + */ + virtual void reserve(size_t capacity) = 0; + + /** + * @brief (ASCII reading) Parse out the next value of this property from a list of tokens. + * + * @param tokens The list of property tokens for the element. + * @param currEntry Index in to tokens, updated after this property is read. + */ + virtual void parseNext(const std::vector& tokens, size_t& currEntry) = 0; + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNext(std::istream& stream) = 0; + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNextBigEndian(std::istream& stream) = 0; + + /** + * @brief (reading) Write a header entry for this property. + * + * @param outStream Stream to write to. + */ + virtual void writeHeader(std::ostream& outStream) = 0; + + /** + * @brief (ASCII writing) write this property for some element to a stream in plaintext + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataASCII(std::ostream& outStream, size_t iElement) = 0; + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinary(std::ostream& outStream, size_t iElement) = 0; + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinaryBigEndian(std::ostream& outStream, size_t iElement) = 0; + + /** + * @brief Number of element entries for this property + * + * @return + */ + virtual size_t size() = 0; + + /** + * @brief A string naming the type of the property + * + * @return + */ + virtual std::string propertyTypeName() = 0; +}; + +namespace { + +/** + * Check if the platform is little endian. + * (not foolproof, but will work on most platforms) + * + * @return true if little endian + */ +bool isLittleEndian() { + int32_t oneVal = 0x1; + char* numPtr = (char*)&oneVal; + return (numPtr[0] == 1); +} + +/** + * Swap endianness. + * + * @param value Value to swap. + * + * @return Swapped value. + */ +template +T swapEndian(T val) { + char* bytes = reinterpret_cast(&val); + for (unsigned int i = 0; i < sizeof(val) / 2; i++) { + std::swap(bytes[sizeof(val) - 1 - i], bytes[i]); + } + return val; +} + + +// Unpack flattened list from the convention used in TypedListProperty +template +std::vector> unflattenList(const std::vector& flatList, const std::vector flatListStarts) { + size_t outerCount = flatListStarts.size() - 1; + + // Put the output here + std::vector> outLists(outerCount); + + if (outerCount == 0) { + return outLists; // quick out for empty + } + + // Copy each sublist + for (size_t iOuter = 0; iOuter < outerCount; iOuter++) { + size_t iFlatStart = flatListStarts[iOuter]; + size_t iFlatEnd = flatListStarts[iOuter + 1]; + outLists[iOuter].insert(outLists[iOuter].begin(), flatList.begin() + iFlatStart, flatList.begin() + iFlatEnd); + } + + return outLists; +} + + +}; // namespace + + +/** + * @brief A property which takes a single value (not a list). + */ +template +class TypedProperty : public Property { + +public: + /** + * @brief Create a new Property with the given name. + * + * @param name_ + */ + TypedProperty(const std::string& name_) : Property(name_) { + if (typeName() == "unknown") { + // TODO should really be a compile-time error + throw std::runtime_error("Attempted property type does not match any type defined by the .ply format."); + } + }; + + /** + * @brief Create a new property and initialize with data. + * + * @param name_ + * @param data_ + */ + TypedProperty(const std::string& name_, const std::vector& data_) : Property(name_), data(data_) { + if (typeName() == "unknown") { + throw std::runtime_error("Attempted property type does not match any type defined by the .ply format."); + } + }; + + virtual ~TypedProperty() override{}; + + /** + * @brief Reserve memory. + * + * @param capacity Expected number of elements. + */ + virtual void reserve(size_t capacity) override { data.reserve(capacity); } + + /** + * @brief (ASCII reading) Parse out the next value of this property from a list of tokens. + * + * @param tokens The list of property tokens for the element. + * @param currEntry Index in to tokens, updated after this property is read. + */ + virtual void parseNext(const std::vector& tokens, size_t& currEntry) override { + data.emplace_back(); + std::istringstream iss(tokens[currEntry]); + typename SerializeType::type tmp; // usually the same type as T + iss >> tmp; + data.back() = tmp; + currEntry++; + }; + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNext(std::istream& stream) override { + data.emplace_back(); + stream.read((char*)&data.back(), sizeof(T)); + } + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNextBigEndian(std::istream& stream) override { + data.emplace_back(); + stream.read((char*)&data.back(), sizeof(T)); + data.back() = swapEndian(data.back()); + } + + /** + * @brief (reading) Write a header entry for this property. + * + * @param outStream Stream to write to. + */ + virtual void writeHeader(std::ostream& outStream) override { + outStream << "property " << typeName() << " " << name << "\n"; + } + + /** + * @brief (ASCII writing) write this property for some element to a stream in plaintext + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataASCII(std::ostream& outStream, size_t iElement) override { + outStream.precision(std::numeric_limits::max_digits10); + outStream << static_cast::type>(data[iElement]); // case is usually a no-op + } + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinary(std::ostream& outStream, size_t iElement) override { + outStream.write((char*)&data[iElement], sizeof(T)); + } + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinaryBigEndian(std::ostream& outStream, size_t iElement) override { + auto value = swapEndian(data[iElement]); + outStream.write((char*)&value, sizeof(T)); + } + + /** + * @brief Number of element entries for this property + * + * @return + */ + virtual size_t size() override { return data.size(); } + + + /** + * @brief A string naming the type of the property + * + * @return + */ + virtual std::string propertyTypeName() override { return typeName(); } + + /** + * @brief The actual data contained in the property + */ + std::vector data; +}; + + +/** + * @brief A property which is a list of value (eg, 3 doubles). Note that lists are always variable length per-element. + */ +template +class TypedListProperty : public Property { + +public: + /** + * @brief Create a new Property with the given name. + * + * @param name_ + */ + TypedListProperty(const std::string& name_, int listCountBytes_) : Property(name_), listCountBytes(listCountBytes_) { + if (typeName() == "unknown") { + throw std::runtime_error("Attempted property type does not match any type defined by the .ply format."); + } + + flattenedIndexStart.push_back(0); + }; + + /** + * @brief Create a new property and initialize with data + * + * @param name_ + * @param data_ + */ + TypedListProperty(const std::string& name_, const std::vector>& data_) : Property(name_) { + if (typeName() == "unknown") { + throw std::runtime_error("Attempted property type does not match any type defined by the .ply format."); + } + + // Populate list with data + flattenedIndexStart.push_back(0); + for (const std::vector& vec : data_) { + for (const T& val : vec) { + flattenedData.emplace_back(val); + } + flattenedIndexStart.push_back(flattenedData.size()); + } + }; + + virtual ~TypedListProperty() override{}; + + /** + * @brief Reserve memory. + * + * @param capacity Expected number of elements. + */ + virtual void reserve(size_t capacity) override { + flattenedData.reserve(3 * capacity); // optimize for triangle meshes + flattenedIndexStart.reserve(capacity + 1); + } + + /** + * @brief (ASCII reading) Parse out the next value of this property from a list of tokens. + * + * @param tokens The list of property tokens for the element. + * @param currEntry Index in to tokens, updated after this property is read. + */ + virtual void parseNext(const std::vector& tokens, size_t& currEntry) override { + + std::istringstream iss(tokens[currEntry]); + size_t count; + iss >> count; + currEntry++; + + size_t currSize = flattenedData.size(); + size_t afterSize = currSize + count; + flattenedData.resize(afterSize); + for (size_t iFlat = currSize; iFlat < afterSize; iFlat++) { + std::istringstream iss(tokens[currEntry]); + typename SerializeType::type tmp; // usually the same type as T + iss >> tmp; + flattenedData[iFlat] = tmp; + currEntry++; + } + flattenedIndexStart.emplace_back(afterSize); + } + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNext(std::istream& stream) override { + + // Read the size of the list + size_t count = 0; + stream.read(((char*)&count), listCountBytes); + + // Read list elements + size_t currSize = flattenedData.size(); + size_t afterSize = currSize + count; + flattenedData.resize(afterSize); + if (count > 0) { + stream.read((char*)&flattenedData[currSize], count * sizeof(T)); + } + flattenedIndexStart.emplace_back(afterSize); + } + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNextBigEndian(std::istream& stream) override { + + // Read the size of the list + size_t count = 0; + stream.read(((char*)&count), listCountBytes); + if (listCountBytes == 8) { + count = (size_t)swapEndian((uint64_t)count); + } else if (listCountBytes == 4) { + count = (size_t)swapEndian((uint32_t)count); + } else if (listCountBytes == 2) { + count = (size_t)swapEndian((uint16_t)count); + } + + // Read list elements + size_t currSize = flattenedData.size(); + size_t afterSize = currSize + count; + flattenedData.resize(afterSize); + if (count > 0) { + stream.read((char*)&flattenedData[currSize], count * sizeof(T)); + } + flattenedIndexStart.emplace_back(afterSize); + + // Swap endian order of list elements + for (size_t iFlat = currSize; iFlat < afterSize; iFlat++) { + flattenedData[iFlat] = swapEndian(flattenedData[iFlat]); + } + } + + /** + * @brief (reading) Write a header entry for this property. Note that we already use "uchar" for the list count type. + * + * @param outStream Stream to write to. + */ + virtual void writeHeader(std::ostream& outStream) override { + // NOTE: We ALWAYS use uchar as the list count output type + outStream << "property list uchar " << typeName() << " " << name << "\n"; + } + + /** + * @brief (ASCII writing) write this property for some element to a stream in plaintext + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataASCII(std::ostream& outStream, size_t iElement) override { + size_t dataStart = flattenedIndexStart[iElement]; + size_t dataEnd = flattenedIndexStart[iElement + 1]; + + // Get the number of list elements as a uchar, and ensure the value fits + size_t dataCount = dataEnd - dataStart; + if (dataCount > std::numeric_limits::max()) { + throw std::runtime_error( + "List property has an element with more entries than fit in a uchar. See note in README."); + } + + outStream << dataCount; + outStream.precision(std::numeric_limits::max_digits10); + for (size_t iFlat = dataStart; iFlat < dataEnd; iFlat++) { + outStream << " " << static_cast::type>(flattenedData[iFlat]); // cast is usually a no-op + } + } + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinary(std::ostream& outStream, size_t iElement) override { + size_t dataStart = flattenedIndexStart[iElement]; + size_t dataEnd = flattenedIndexStart[iElement + 1]; + + // Get the number of list elements as a uchar, and ensure the value fits + size_t dataCount = dataEnd - dataStart; + if (dataCount > std::numeric_limits::max()) { + throw std::runtime_error( + "List property has an element with more entries than fit in a uchar. See note in README."); + } + uint8_t count = static_cast(dataCount); + + outStream.write((char*)&count, sizeof(uint8_t)); + outStream.write((char*)&flattenedData[dataStart], count * sizeof(T)); + } + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinaryBigEndian(std::ostream& outStream, size_t iElement) override { + size_t dataStart = flattenedIndexStart[iElement]; + size_t dataEnd = flattenedIndexStart[iElement + 1]; + + // Get the number of list elements as a uchar, and ensure the value fits + size_t dataCount = dataEnd - dataStart; + if (dataCount > std::numeric_limits::max()) { + throw std::runtime_error( + "List property has an element with more entries than fit in a uchar. See note in README."); + } + uint8_t count = static_cast(dataCount); + + outStream.write((char*)&count, sizeof(uint8_t)); + for (size_t iFlat = dataStart; iFlat < dataEnd; iFlat++) { + T value = swapEndian(flattenedData[iFlat]); + outStream.write((char*)&value, sizeof(T)); + } + } + + /** + * @brief Number of element entries for this property + * + * @return + */ + virtual size_t size() override { return flattenedIndexStart.size() - 1; } + + + /** + * @brief A string naming the type of the property + * + * @return + */ + virtual std::string propertyTypeName() override { return typeName(); } + + /** + * @brief The (flattened) data for the property, as formed by concatenating all of the individual element lists + * together. + */ + std::vector flattenedData; + + /** + * @brief Indices in to flattenedData. The i'th element gives the index in to flattenedData where the element's data + * begins. A final entry is included which is the length of flattenedData. Size is N_elem + 1. + */ + std::vector flattenedIndexStart; + + /** + * @brief The number of bytes used to store the count for lists of data. + */ + int listCountBytes = -1; +}; + + +/** + * @brief Helper function to construct a new property of the appropriate type. + * + * @param name The name of the property to construct. + * @param typeStr A string naming the type according to the format. + * @param isList Is this a plain property, or a list property? + * @param listCountTypeStr If a list property, the type of the count varible. + * + * @return A new Property with the proper type. + */ +inline std::unique_ptr createPropertyWithType(const std::string& name, const std::string& typeStr, + bool isList, const std::string& listCountTypeStr) { + + // == Figure out how many bytes the list count field has, if this is a list type + // Note: some files seem to use signed types here, we read the width but always parse as if unsigned + int listCountBytes = -1; + if (isList) { + if (listCountTypeStr == "uchar" || listCountTypeStr == "uint8" || listCountTypeStr == "char" || + listCountTypeStr == "int8") { + listCountBytes = 1; + } else if (listCountTypeStr == "ushort" || listCountTypeStr == "uint16" || listCountTypeStr == "short" || + listCountTypeStr == "int16") { + listCountBytes = 2; + } else if (listCountTypeStr == "uint" || listCountTypeStr == "uint32" || listCountTypeStr == "int" || + listCountTypeStr == "int32") { + listCountBytes = 4; + } else { + throw std::runtime_error("Unrecognized list count type: " + listCountTypeStr); + } + } + + // = Unsigned int + + // 8 bit unsigned + if (typeStr == "uchar" || typeStr == "uint8") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 16 bit unsigned + else if (typeStr == "ushort" || typeStr == "uint16") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 32 bit unsigned + else if (typeStr == "uint" || typeStr == "uint32") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // = Signed int + + // 8 bit signed + if (typeStr == "char" || typeStr == "int8") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 16 bit signed + else if (typeStr == "short" || typeStr == "int16") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 32 bit signed + else if (typeStr == "int" || typeStr == "int32") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // = Float + + // 32 bit float + else if (typeStr == "float" || typeStr == "float32") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 64 bit float + else if (typeStr == "double" || typeStr == "float64") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + else { + throw std::runtime_error("Data type: " + typeStr + " cannot be mapped to .ply format"); + } +} + +/** + * @brief An element (more properly an element type) in the .ply object. Tracks the name of the elemnt type (eg, + * "vertices"), the number of elements of that type (eg, 1244), and any properties associated with that element (eg, + * "position", "color"). + */ +class Element { + +public: + /** + * @brief Create a new element type. + * + * @param name_ Name of the element type (eg, "vertices") + * @param count_ Number of instances of this element. + */ + Element(const std::string& name_, size_t count_) : name(name_), count(count_) {} + + std::string name; + size_t count; + std::vector> properties; + + /** + * @brief Check if a property exists. + * + * @param target The name of the property to get. + * + * @return Whether the target property exists. + */ + bool hasProperty(const std::string& target) { + for (std::unique_ptr& prop : properties) { + if (prop->name == target) { + return true; + } + } + return false; + } + + /** + * @brief Check if a property exists with the requested type. + * + * @tparam T The type of the property + * @param target The name of the property to get. + * + * @return Whether the target property exists. + */ + template + bool hasPropertyType(const std::string& target) { + for (std::unique_ptr& prop : properties) { + if (prop->name == target) { + TypedProperty* castedProp = dynamic_cast*>(prop.get()); + if (castedProp) { + return true; + } + return false; + } + } + return false; + } + + /** + * @brief A list of the names of all properties + * + * @return Property names + */ + std::vector getPropertyNames() { + std::vector names; + for (std::unique_ptr& p : properties) { + names.push_back(p->name); + } + return names; + } + + /** + * @brief Low-level method to get a pointer to a property. Users probably don't need to call this. + * + * @param target The name of the property to get. + * + * @return A (unique_ptr) pointer to the property. + */ + std::unique_ptr& getPropertyPtr(const std::string& target) { + for (std::unique_ptr& prop : properties) { + if (prop->name == target) { + return prop; + } + } + throw std::runtime_error("PLY parser: element " + name + " does not have property " + target); + } + + /** + * @brief Add a new (plain, not list) property for this element type. + * + * @tparam T The type of the property + * @param propertyName The name of the property + * @param data The data for the property. Must have the same length as the number of elements. + */ + template + void addProperty(const std::string& propertyName, const std::vector& data) { + + if (data.size() != count) { + throw std::runtime_error("PLY write: new property " + propertyName + " has size which does not match element"); + } + + // If there is already some property with this name, remove it + for (size_t i = 0; i < properties.size(); i++) { + if (properties[i]->name == propertyName) { + properties.erase(properties.begin() + i); + i--; + } + } + + // Copy to canonical type. Often a no-op, but takes care of standardizing widths across platforms. + std::vector::type> canonicalVec(data.begin(), data.end()); + + properties.push_back( + std::unique_ptr(new TypedProperty::type>(propertyName, canonicalVec))); + } + + /** + * @brief Add a new list property for this element type. + * + * @tparam T The type of the property (eg, "double" for a list of doubles) + * @param propertyName The name of the property + * @param data The data for the property. Outer vector must have the same length as the number of elements. + */ + template + void addListProperty(const std::string& propertyName, const std::vector>& data) { + + if (data.size() != count) { + throw std::runtime_error("PLY write: new property " + propertyName + " has size which does not match element"); + } + + // If there is already some property with this name, remove it + for (size_t i = 0; i < properties.size(); i++) { + if (properties[i]->name == propertyName) { + properties.erase(properties.begin() + i); + i--; + } + } + + // Copy to canonical type. Often a no-op, but takes care of standardizing widths across platforms. + std::vector::type>> canonicalListVec; + for (const std::vector& subList : data) { + canonicalListVec.emplace_back(subList.begin(), subList.end()); + } + + properties.push_back(std::unique_ptr( + new TypedListProperty::type>(propertyName, canonicalListVec))); + } + + /** + * @brief Get a vector of a data from a property for this element. Automatically promotes to larger types. Throws if + * requested data is unavailable. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector getProperty(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + + // Get a copy of the data with auto-promoting type magic + return getDataFromPropertyRecursive(prop.get()); + } + + /** + * @brief Get a vector of a data from a property for this element. Unlike getProperty(), only returns if the ply + * record contains a type that matches T exactly. Throws if * requested data is unavailable. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector getPropertyType(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + TypedProperty* castedProp = dynamic_cast*>(prop); + if (castedProp) { + return castedProp->data; + } + + // No match, failure + throw std::runtime_error("PLY parser: property " + prop->name + " is not of type type " + typeName() + + ". Has type " + prop->propertyTypeName()); + } + + /** + * @brief Get a vector of lists of data from a property for this element. Automatically promotes to larger types. + * Throws if requested data is unavailable. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector> getListProperty(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + + // Get a copy of the data with auto-promoting type magic + return getDataFromListPropertyRecursive(prop.get()); + } + + /** + * @brief Get a vector of a data from a property for this element. Unlike getProperty(), only returns if the ply + * record contains a type that matches T exactly. Throws if * requested data is unavailable. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector> getListPropertyType(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + TypedListProperty* castedProp = dynamic_cast*>(prop); + if (castedProp) { + return unflattenList(castedProp->flattenedData, castedProp->flattenedIndexStart); + } + + // No match, failure + throw std::runtime_error("PLY parser: list property " + prop->name + " is not of type " + typeName() + + ". Has type " + prop->propertyTypeName()); + } + + + /** + * @brief Get a vector of lists of data from a property for this element. Automatically promotes to larger types. + * Unlike getListProperty(), this method will additionally convert between types of different sign (eg, requesting and + * int32 would get data from a uint32); doing so naively converts between signed and unsigned types. This is typically + * useful for data representing indices, which might be stored as signed or unsigned numbers. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector> getListPropertyAnySign(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + + // Get a copy of the data with auto-promoting type magic + try { + // First, try the usual approach, looking for a version of the property with the same signed-ness and possibly + // smaller size + return getDataFromListPropertyRecursive(prop.get()); + } catch (const std::runtime_error& orig_e) { + + // If the usual approach fails, look for a version with opposite signed-ness + try { + + // This type has the oppopsite signeness as the input type + typedef typename CanonicalName::type Tcan; + typedef typename std::conditional::value, typename std::make_unsigned::type, + typename std::make_signed::type>::type OppsignType; + + return getDataFromListPropertyRecursive(prop.get()); + + } catch (const std::runtime_error&) { + throw orig_e; + } + + throw orig_e; + } + } + + + /** + * @brief Performs sanity checks on the element, throwing if any fail. + */ + void validate() { + + // Make sure no properties have duplicate names, and no names have whitespace + for (size_t iP = 0; iP < properties.size(); iP++) { + for (char c : properties[iP]->name) { + if (std::isspace(c)) { + throw std::runtime_error("Ply validate: illegal whitespace in name " + properties[iP]->name); + } + } + for (size_t jP = iP + 1; jP < properties.size(); jP++) { + if (properties[iP]->name == properties[jP]->name) { + throw std::runtime_error("Ply validate: multiple properties with name " + properties[iP]->name); + } + } + } + + // Make sure all properties have right length + for (size_t iP = 0; iP < properties.size(); iP++) { + if (properties[iP]->size() != count) { + throw std::runtime_error("Ply validate: property has wrong size. " + properties[iP]->name + + " does not match element size."); + } + } + } + + /** + * @brief Writes out this element's information to the file header. + * + * @param outStream The stream to use. + */ + void writeHeader(std::ostream& outStream) { + + outStream << "element " << name << " " << count << "\n"; + + for (std::unique_ptr& p : properties) { + p->writeHeader(outStream); + } + } + + /** + * @brief (ASCII writing) Writes out all of the data for every element of this element type to the stream, including + * all contained properties. + * + * @param outStream The stream to write to. + */ + void writeDataASCII(std::ostream& outStream) { + // Question: what is the proper output for an element with no properties? Here, we write a blank line, so there is + // one line per element no matter what. + for (size_t iE = 0; iE < count; iE++) { + for (size_t iP = 0; iP < properties.size(); iP++) { + properties[iP]->writeDataASCII(outStream, iE); + if (iP < properties.size() - 1) { + outStream << " "; + } + } + outStream << "\n"; + } + } + + + /** + * @brief (binary writing) Writes out all of the data for every element of this element type to the stream, including + * all contained properties. + * + * @param outStream The stream to write to. + */ + void writeDataBinary(std::ostream& outStream) { + for (size_t iE = 0; iE < count; iE++) { + for (size_t iP = 0; iP < properties.size(); iP++) { + properties[iP]->writeDataBinary(outStream, iE); + } + } + } + + + /** + * @brief (binary writing) Writes out all of the data for every element of this element type to the stream, including + * all contained properties. + * + * @param outStream The stream to write to. + */ + void writeDataBinaryBigEndian(std::ostream& outStream) { + for (size_t iE = 0; iE < count; iE++) { + for (size_t iP = 0; iP < properties.size(); iP++) { + properties[iP]->writeDataBinaryBigEndian(outStream, iE); + } + } + } + + + /** + * @brief Helper function which does the hard work to implement type promotion for data getters. Throws if type + * conversion fails. + * + * @tparam D The desired output type + * @tparam T The current attempt for the actual type of the property + * @param prop The property to get (does not delete nor share pointer) + * + * @return The data, with the requested type + */ + template + std::vector getDataFromPropertyRecursive(Property* prop) { + + typedef typename CanonicalName::type Tcan; + + { // Try to return data of type D from a property of type T + TypedProperty* castedProp = dynamic_cast*>(prop); + if (castedProp) { + // Succeeded, return a buffer of the data (copy while converting type) + std::vector castedVec; + castedVec.reserve(castedProp->data.size()); + for (Tcan& v : castedProp->data) { + castedVec.push_back(static_cast(v)); + } + return castedVec; + } + } + + TypeChain chainType; + if (chainType.hasChildType) { + return getDataFromPropertyRecursive::type>(prop); + } else { + // No smaller type to try, failure + throw std::runtime_error("PLY parser: property " + prop->name + " cannot be coerced to requested type " + + typeName() + ". Has type " + prop->propertyTypeName()); + } + } + + + /** + * @brief Helper function which does the hard work to implement type promotion for list data getters. Throws if type + * conversion fails. + * + * @tparam D The desired output type + * @tparam T The current attempt for the actual type of the property + * @param prop The property to get (does not delete nor share pointer) + * + * @return The data, with the requested type + */ + template + std::vector> getDataFromListPropertyRecursive(Property* prop) { + typedef typename CanonicalName::type Tcan; + + TypedListProperty* castedProp = dynamic_cast*>(prop); + if (castedProp) { + // Succeeded, return a buffer of the data (copy while converting type) + + // Convert to flat buffer of new type + std::vector* castedFlatVec = nullptr; + std::vector castedFlatVecCopy; // we _might_ make a copy here, depending on is_same below + + if (std::is_same, std::vector>::value) { + // just use the array we already have + castedFlatVec = addressIfSame>(castedProp->flattenedData, 0 /* dummy arg to disambiguate */); + } else { + // make a copy + castedFlatVecCopy.reserve(castedProp->flattenedData.size()); + for (Tcan& v : castedProp->flattenedData) { + castedFlatVecCopy.push_back(static_cast(v)); + } + castedFlatVec = &castedFlatVecCopy; + } + + // Unflatten and return + return unflattenList(*castedFlatVec, castedProp->flattenedIndexStart); + } + + TypeChain chainType; + if (chainType.hasChildType) { + return getDataFromListPropertyRecursive::type>(prop); + } else { + // No smaller type to try, failure + throw std::runtime_error("PLY parser: list property " + prop->name + + " cannot be coerced to requested type list " + typeName() + ". Has type list " + + prop->propertyTypeName()); + } + } +}; + + +// Some string helpers +namespace { + +inline std::string trimSpaces(const std::string& input) { + size_t start = 0; + while (start < input.size() && input[start] == ' ') start++; + size_t end = input.size(); + while (end > start && (input[end - 1] == ' ' || input[end - 1] == '\n' || input[end - 1] == '\r')) end--; + return input.substr(start, end - start); +} + +inline std::vector tokenSplit(const std::string& input) { + std::vector result; + size_t curr = 0; + size_t found = 0; + while ((found = input.find_first_of(' ', curr)) != std::string::npos) { + std::string token = input.substr(curr, found - curr); + token = trimSpaces(token); + if (token.size() > 0) { + result.push_back(token); + } + curr = found + 1; + } + std::string token = input.substr(curr); + token = trimSpaces(token); + if (token.size() > 0) { + result.push_back(token); + } + + return result; +} + +inline bool startsWith(const std::string& input, const std::string& query) { + return input.compare(0, query.length(), query) == 0; +} +}; // namespace + + +/** + * @brief Primary class; represents a set of data in the .ply format. + */ +class PLYData { + +public: + /** + * @brief Create an empty PLYData object. + */ + PLYData(){}; + + /** + * @brief Initialize a PLYData by reading from a file. Throws if any failures occur. + * + * @param filename The file to read from. + * @param verbose If true, print useful info about the file to stdout + */ + PLYData(const std::string& filename, bool verbose = false) { + + using std::cout; + using std::endl; + using std::string; + using std::vector; + + if (verbose) cout << "PLY parser: Reading ply file: " << filename << endl; + + // Open a file in binary always, in case it turns out to have binary data. + std::ifstream inStream(filename, std::ios::binary); + if (inStream.fail()) { + throw std::runtime_error("PLY parser: Could not open file " + filename); + } + + parsePLY(inStream, verbose); + + if (verbose) { + cout << " - Finished parsing file." << endl; + } + } + + /** + * @brief Initialize a PLYData by reading from a stringstream. Throws if any failures occur. + * + * @param inStream The stringstream to read from. + * @param verbose If true, print useful info about the file to stdout + */ + PLYData(std::istream& inStream, bool verbose = false) { + + using std::cout; + using std::endl; + + if (verbose) cout << "PLY parser: Reading ply file from stream" << endl; + + parsePLY(inStream, verbose); + + if (verbose) { + cout << " - Finished parsing stream." << endl; + } + } + + /** + * @brief Perform sanity checks on the file, throwing if any fail. + */ + void validate() { + + for (size_t iE = 0; iE < elements.size(); iE++) { + for (char c : elements[iE].name) { + if (std::isspace(c)) { + throw std::runtime_error("Ply validate: illegal whitespace in element name " + elements[iE].name); + } + } + for (size_t jE = iE + 1; jE < elements.size(); jE++) { + if (elements[iE].name == elements[jE].name) { + throw std::runtime_error("Ply validate: duplcate element name " + elements[iE].name); + } + } + } + + // Do a quick validation sanity check + for (Element& e : elements) { + e.validate(); + } + } + + /** + * @brief Write this data to a .ply file. + * + * @param filename The file to write to. + * @param format The format to use (binary or ascii?) + */ + void write(const std::string& filename, DataFormat format = DataFormat::ASCII) { + outputDataFormat = format; + + validate(); + + // Open stream for writing + std::ofstream outStream(filename, std::ios::out | std::ios::binary); + if (!outStream.good()) { + throw std::runtime_error("Ply writer: Could not open output file " + filename + " for writing"); + } + + writePLY(outStream); + } + + /** + * @brief Write this data to an output stream + * + * @param outStream The output stream to write to. + * @param format The format to use (binary or ascii?) + */ + void write(std::ostream& outStream, DataFormat format = DataFormat::ASCII) { + outputDataFormat = format; + + validate(); + + writePLY(outStream); + } + + /** + * @brief Get an element type by name ("vertices") + * + * @param target The name of the element type to get + * + * @return A reference to the element type. + */ + Element& getElement(const std::string& target) { + for (Element& e : elements) { + if (e.name == target) return e; + } + throw std::runtime_error("PLY parser: no element with name: " + target); + } + + + /** + * @brief Check if an element type exists + * + * @param target The name to check for. + * + * @return True if exists. + */ + bool hasElement(const std::string& target) { + for (Element& e : elements) { + if (e.name == target) return true; + } + return false; + } + + + /** + * @brief A list of the names of all elements + * + * @return Element names + */ + std::vector getElementNames() { + std::vector names; + for (Element& e : elements) { + names.push_back(e.name); + } + return names; + } + + + /** + * @brief Add a new element type to the object + * + * @param name The name of the new element type ("vertices"). + * @param count The number of elements of this type. + */ + void addElement(const std::string& name, size_t count) { elements.emplace_back(name, count); } + + // === Common-case helpers + + + /** + * @brief Common-case helper get mesh vertex positions + * + * @param vertexElementName The element name to use (default: "vertex") + * + * @return A vector of vertex positions. + */ + std::vector> getVertexPositions(const std::string& vertexElementName = "vertex") { + + std::vector xPos = getElement(vertexElementName).getProperty("x"); + std::vector yPos = getElement(vertexElementName).getProperty("y"); + std::vector zPos = getElement(vertexElementName).getProperty("z"); + + std::vector> result(xPos.size()); + for (size_t i = 0; i < result.size(); i++) { + result[i][0] = xPos[i]; + result[i][1] = yPos[i]; + result[i][2] = zPos[i]; + } + + return result; + } + + /** + * @brief Common-case helper get mesh vertex colors + * + * @param vertexElementName The element name to use (default: "vertex") + * + * @return A vector of vertex colors (unsigned chars [0,255]). + */ + std::vector> getVertexColors(const std::string& vertexElementName = "vertex") { + + std::vector r = getElement(vertexElementName).getProperty("red"); + std::vector g = getElement(vertexElementName).getProperty("green"); + std::vector b = getElement(vertexElementName).getProperty("blue"); + + std::vector> result(r.size()); + for (size_t i = 0; i < result.size(); i++) { + result[i][0] = r[i]; + result[i][1] = g[i]; + result[i][2] = b[i]; + } + + return result; + } + + /** + * @brief Common-case helper to get face indices for a mesh. If not template type is given, size_t is used. Naively + * converts to requested signedness, which may lead to unexpected values if an unsigned type is used and file contains + * negative values. + * + * @return The indices into the vertex elements for each face. Usually 0-based, though there are no formal rules. + */ + template + std::vector> getFaceIndices() { + + for (const std::string& f : std::vector{"face"}) { + for (const std::string& p : std::vector{"vertex_indices", "vertex_index"}) { + try { + return getElement(f).getListPropertyAnySign(p); + } catch (const std::runtime_error&) { + // that's fine + } + } + } + throw std::runtime_error("PLY parser: could not find face vertex indices attribute under any common name."); + } + + + /** + * @brief Common-case helper set mesh vertex positons. Creates vertex element, if necessary. + * + * @param vertexPositions A vector of vertex positions + */ + void addVertexPositions(std::vector>& vertexPositions) { + + std::string vertexName = "vertex"; + size_t N = vertexPositions.size(); + + // Create the element + if (!hasElement(vertexName)) { + addElement(vertexName, N); + } + + // De-interleave + std::vector xPos(N); + std::vector yPos(N); + std::vector zPos(N); + for (size_t i = 0; i < vertexPositions.size(); i++) { + xPos[i] = vertexPositions[i][0]; + yPos[i] = vertexPositions[i][1]; + zPos[i] = vertexPositions[i][2]; + } + + // Store + getElement(vertexName).addProperty("x", xPos); + getElement(vertexName).addProperty("y", yPos); + getElement(vertexName).addProperty("z", zPos); + } + + /** + * @brief Common-case helper set mesh vertex colors. Creates a vertex element, if necessary. + * + * @param colors A vector of vertex colors (unsigned chars [0,255]). + */ + void addVertexColors(std::vector>& colors) { + + std::string vertexName = "vertex"; + size_t N = colors.size(); + + // Create the element + if (!hasElement(vertexName)) { + addElement(vertexName, N); + } + + // De-interleave + std::vector r(N); + std::vector g(N); + std::vector b(N); + for (size_t i = 0; i < colors.size(); i++) { + r[i] = colors[i][0]; + g[i] = colors[i][1]; + b[i] = colors[i][2]; + } + + // Store + getElement(vertexName).addProperty("red", r); + getElement(vertexName).addProperty("green", g); + getElement(vertexName).addProperty("blue", b); + } + + /** + * @brief Common-case helper set mesh vertex colors. Creates a vertex element, if necessary. + * + * @param colors A vector of vertex colors as floating point [0,1] values. Internally converted to [0,255] chars. + */ + void addVertexColors(std::vector>& colors) { + + std::string vertexName = "vertex"; + size_t N = colors.size(); + + // Create the element + if (!hasElement(vertexName)) { + addElement(vertexName, N); + } + + auto toChar = [](double v) { + if (v < 0.0) v = 0.0; + if (v > 1.0) v = 1.0; + return static_cast(v * 255.); + }; + + // De-interleave + std::vector r(N); + std::vector g(N); + std::vector b(N); + for (size_t i = 0; i < colors.size(); i++) { + r[i] = toChar(colors[i][0]); + g[i] = toChar(colors[i][1]); + b[i] = toChar(colors[i][2]); + } + + // Store + getElement(vertexName).addProperty("red", r); + getElement(vertexName).addProperty("green", g); + getElement(vertexName).addProperty("blue", b); + } + + + /** + * @brief Common-case helper to set face indices. Creates a face element if needed. The input type will be casted to a + * 32 bit integer of the same signedness. + * + * @param indices The indices into the vertex list around each face. + */ + template + void addFaceIndices(std::vector>& indices) { + + std::string faceName = "face"; + size_t N = indices.size(); + + // Create the element + if (!hasElement(faceName)) { + addElement(faceName, N); + } + + // Cast to 32 bit + typedef typename std::conditional::value, int32_t, uint32_t>::type IndType; + std::vector> intInds; + for (std::vector& l : indices) { + std::vector thisInds; + for (T& val : l) { + IndType valConverted = static_cast(val); + if (valConverted != val) { + throw std::runtime_error("Index value " + std::to_string(val) + + " could not be converted to a .ply integer without loss of data. Note that .ply " + "only supports 32-bit ints."); + } + thisInds.push_back(valConverted); + } + intInds.push_back(thisInds); + } + + // Store + getElement(faceName).addListProperty("vertex_indices", intInds); + } + + + /** + * @brief Comments for the file. When writing, each entry will be written as a sequential comment line. + */ + std::vector comments; + + + /** + * @brief obj_info comments for the file. When writing, each entry will be written as a sequential comment line. + */ + std::vector objInfoComments; + +private: + std::vector elements; + const int majorVersion = 1; // I'll buy you a drink if these ever get bumped + const int minorVersion = 0; + + DataFormat inputDataFormat = DataFormat::ASCII; // set when reading from a file + DataFormat outputDataFormat = DataFormat::ASCII; // option for writing files + + + // === Reading === + + /** + * @brief Parse a PLY file from an input stream + * + * @param inStream + * @param verbose + */ + void parsePLY(std::istream& inStream, bool verbose) { + + // == Process the header + parseHeader(inStream, verbose); + + + // === Parse data from a binary file + if (inputDataFormat == DataFormat::Binary) { + parseBinary(inStream, verbose); + } + // === Parse data from an binary file + else if (inputDataFormat == DataFormat::BinaryBigEndian) { + parseBinaryBigEndian(inStream, verbose); + } + // === Parse data from an ASCII file + else if (inputDataFormat == DataFormat::ASCII) { + parseASCII(inStream, verbose); + } + } + + /** + * @brief Read the header for a file + * + * @param inStream + * @param verbose + */ + void parseHeader(std::istream& inStream, bool verbose) { + + using std::cout; + using std::endl; + using std::string; + using std::vector; + + // First two lines are predetermined + { // First line is magic constant + string plyLine; + std::getline(inStream, plyLine); + if (trimSpaces(plyLine) != "ply") { + throw std::runtime_error("PLY parser: File does not appear to be ply file. First line should be 'ply'"); + } + } + + { // second line is version + string styleLine; + std::getline(inStream, styleLine); + vector tokens = tokenSplit(styleLine); + if (tokens.size() != 3) throw std::runtime_error("PLY parser: bad format line"); + std::string formatStr = tokens[0]; + std::string typeStr = tokens[1]; + std::string versionStr = tokens[2]; + + // "format" + if (formatStr != "format") throw std::runtime_error("PLY parser: bad format line"); + + // ascii/binary + if (typeStr == "ascii") { + inputDataFormat = DataFormat::ASCII; + if (verbose) cout << " - Type: ascii" << endl; + } else if (typeStr == "binary_little_endian") { + inputDataFormat = DataFormat::Binary; + if (verbose) cout << " - Type: binary" << endl; + } else if (typeStr == "binary_big_endian") { + inputDataFormat = DataFormat::BinaryBigEndian; + if (verbose) cout << " - Type: binary big endian" << endl; + } else { + throw std::runtime_error("PLY parser: bad format line"); + } + + // version + if (versionStr != "1.0") { + throw std::runtime_error("PLY parser: encountered file with version != 1.0. Don't know how to parse that"); + } + if (verbose) cout << " - Version: " << versionStr << endl; + } + + // Consume header line by line + while (inStream.good()) { + string line; + std::getline(inStream, line); + + // Parse a comment + if (startsWith(line, "comment")) { + string comment = line.substr(8); + if (verbose) cout << " - Comment: " << comment << endl; + comments.push_back(comment); + continue; + } + + // Parse an obj_info comment + if (startsWith(line, "obj_info")) { + string infoComment = line.substr(9); + if (verbose) cout << " - obj_info: " << infoComment << endl; + objInfoComments.push_back(infoComment); + continue; + } + + // Parse an element + else if (startsWith(line, "element")) { + vector tokens = tokenSplit(line); + if (tokens.size() != 3) throw std::runtime_error("PLY parser: Invalid element line"); + string name = tokens[1]; + size_t count; + std::istringstream iss(tokens[2]); + iss >> count; + elements.emplace_back(name, count); + if (verbose) cout << " - Found element: " << name << " (count = " << count << ")" << endl; + continue; + } + + // Parse a property list + else if (startsWith(line, "property list")) { + vector tokens = tokenSplit(line); + if (tokens.size() != 5) throw std::runtime_error("PLY parser: Invalid property list line"); + if (elements.size() == 0) throw std::runtime_error("PLY parser: Found property list without previous element"); + string countType = tokens[2]; + string type = tokens[3]; + string name = tokens[4]; + elements.back().properties.push_back(createPropertyWithType(name, type, true, countType)); + if (verbose) + cout << " - Found list property: " << name << " (count type = " << countType << ", data type = " << type + << ")" << endl; + continue; + } + + // Parse a property + else if (startsWith(line, "property")) { + vector tokens = tokenSplit(line); + if (tokens.size() != 3) throw std::runtime_error("PLY parser: Invalid property line"); + if (elements.size() == 0) throw std::runtime_error("PLY parser: Found property without previous element"); + string type = tokens[1]; + string name = tokens[2]; + elements.back().properties.push_back(createPropertyWithType(name, type, false, "")); + if (verbose) cout << " - Found property: " << name << " (type = " << type << ")" << endl; + continue; + } + + // Parse end of header + else if (startsWith(line, "end_header")) { + break; + } + + // Error! + else { + throw std::runtime_error("Unrecognized header line: " + line); + } + } + } + + /** + * @brief Read the actual data for a file, in ASCII + * + * @param inStream + * @param verbose + */ + void parseASCII(std::istream& inStream, bool verbose) { + + using std::string; + using std::vector; + + // Read all elements + for (Element& elem : elements) { + + if (verbose) { + std::cout << " - Processing element: " << elem.name << std::endl; + } + + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->reserve(elem.count); + } + for (size_t iEntry = 0; iEntry < elem.count; iEntry++) { + + string line; + std::getline(inStream, line); + + // Some .ply files seem to include empty lines before the start of property data (though this is not specified + // in the format description). We attempt to recover and parse such files by skipping any empty lines. + if (!elem.properties.empty()) { // if the element has no properties, the line _should_ be blank, presumably + while (line.empty()) { // skip lines until we hit something nonempty + std::getline(inStream, line); + } + } + + vector tokens = tokenSplit(line); + size_t iTok = 0; + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->parseNext(tokens, iTok); + } + } + } + } + + /** + * @brief Read the actual data for a file, in binary. + * + * @param inStream + * @param verbose + */ + void parseBinary(std::istream& inStream, bool verbose) { + + if (!isLittleEndian()) { + throw std::runtime_error("binary reading assumes little endian system"); + } + + using std::string; + using std::vector; + + // Read all elements + for (Element& elem : elements) { + + if (verbose) { + std::cout << " - Processing element: " << elem.name << std::endl; + } + + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->reserve(elem.count); + } + for (size_t iEntry = 0; iEntry < elem.count; iEntry++) { + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->readNext(inStream); + } + } + } + } + + /** + * @brief Read the actual data for a file, in binary. + * + * @param inStream + * @param verbose + */ + void parseBinaryBigEndian(std::istream& inStream, bool verbose) { + + if (!isLittleEndian()) { + throw std::runtime_error("binary reading assumes little endian system"); + } + + using std::string; + using std::vector; + + // Read all elements + for (Element& elem : elements) { + + if (verbose) { + std::cout << " - Processing element: " << elem.name << std::endl; + } + + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->reserve(elem.count); + } + for (size_t iEntry = 0; iEntry < elem.count; iEntry++) { + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->readNextBigEndian(inStream); + } + } + } + } + + // === Writing === + + + /** + * @brief write a PLY file to an output stream + * + * @param outStream + */ + void writePLY(std::ostream& outStream) { + + writeHeader(outStream); + + // Write all elements + for (Element& e : elements) { + if (outputDataFormat == DataFormat::Binary) { + if (!isLittleEndian()) { + throw std::runtime_error("binary writing assumes little endian system"); + } + e.writeDataBinary(outStream); + } else if (outputDataFormat == DataFormat::BinaryBigEndian) { + if (!isLittleEndian()) { + throw std::runtime_error("binary writing assumes little endian system"); + } + e.writeDataBinaryBigEndian(outStream); + } else if (outputDataFormat == DataFormat::ASCII) { + e.writeDataASCII(outStream); + } + } + } + + + /** + * @brief Write out a header for a file + * + * @param outStream + */ + void writeHeader(std::ostream& outStream) { + + // Magic line + outStream << "ply\n"; + + // Type line + outStream << "format "; + if (outputDataFormat == DataFormat::Binary) { + outStream << "binary_little_endian "; + } else if (outputDataFormat == DataFormat::BinaryBigEndian) { + outStream << "binary_big_endian "; + } else if (outputDataFormat == DataFormat::ASCII) { + outStream << "ascii "; + } + + // Version number + outStream << majorVersion << "." << minorVersion << "\n"; + + // Write comments + bool hasHapplyComment = false; + std::string happlyComment = "Written with hapPLY (https://github.com/nmwsharp/happly)"; + for (const std::string& comment : comments) { + if (comment == happlyComment) hasHapplyComment = true; + outStream << "comment " << comment << "\n"; + } + if (!hasHapplyComment) { + outStream << "comment " << happlyComment << "\n"; + } + + // Write obj_info comments + for (const std::string& comment : objInfoComments) { + outStream << "obj_info " << comment << "\n"; + } + + // Write elements (and their properties) + for (Element& e : elements) { + e.writeHeader(outStream); + } + + // End header + outStream << "end_header\n"; + } +}; + +} // namespace happly diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 929f4302..15ca4a8d 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -20,7 +20,7 @@ The following environmental variables should be set up (please adjust them for your particular configuration): export ASTROBEE_SOURCE_PATH=$HOME/astrobee/src - export ASTROBEE_BUILD_PATH=$HOME/astrobee/build + export ASTROBEE_BUILD_PATH=$HOME/astrobee export ISAAC_WS=$HOME/isaac ## Robot sensors @@ -110,7 +110,8 @@ To compile Voxblox, clone https://github.com/ethz-asl/voxblox by the introduction of a small tool named batch_tsdf.cc that reads the clouds to fuse and the transforms from disk and writes the output mesh back to disk, instead -of using ROS. +of using ROS. It also can take into account a point's reliability when +fusing the clouds, which is computed by the geometry mapper. Compile it using the instructions at: @@ -645,19 +646,21 @@ Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, --camera_type all \ --start 0 \ --duration 1e+10 \ - --sampling_spacing_seconds 2 \ + --sampling_spacing_seconds 5 \ --dist_between_processed_cams 0.1 \ --depth_exclude_columns 0 \ --depth_exclude_rows 0 \ --foreshortening_delta 5.0 \ --median_filters "7 0.1 25 0.1" \ + --reliability_weight_exponent 2 \ + --voxblox_integrator merged \ --depth_hole_fill_diameter 30.0 \ - --max_ray_length 2 \ + --max_ray_length 2.5 \ --voxel_size 0.01 \ --max_iso_times_exposure 5.1 \ - --smoothing_time 1e-5 \ - --max_num_hole_edges 4000 \ - --max_hole_diameter 0.8 \ + --smoothing_time 5e-6 \ + --max_num_hole_edges 8000 \ + --max_hole_diameter 1.8 \ --num_min_faces_in_component 100 \ --num_components_to_keep 100 \ --edge_keep_ratio 0.2 \ @@ -703,9 +706,16 @@ Parameters: are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (see --max_hole_diameter). The default is 30. + --reliability_weight_exponent: A larger value will give more + weight to depth points corresponding to pixels closer to depth + image center, which are considered more reliable. The default is + 2.0. --max_ray_length_m: Dictates at what distance from the depth camera to truncate the rays (in meters). This can be small if the images are acquired close to walls and facing them. + --voxblox_integrator: When fusing the depth point clouds use + this VoxBlox method. Options are: "merged", "simple", and + "fast". The default is "merged". --voxel_size is the grid size used for binning the points, in meters. --max_iso_times_exposure: Apply the inverse gamma transform to images, multiply them by max_iso_times_exposure/ISO/exposure_time @@ -1121,7 +1131,7 @@ are added back. ### Map building and registration -Build a sparse map with these images. Use the same environemnt as +Build a sparse map with these images. Use the same environment as above: dir=nav_images @@ -1333,7 +1343,7 @@ A geometry mapper run directory has all the inputs this tool needs. It can be run as follows: export ASTROBEE_SOURCE_PATH=$HOME/projects/astrobee/src - export ASTROBEE_BUILD_PATH=$HOME/projects/astrobee/build + export ASTROBEE_BUILD_PATH=$HOME/projects/astrobee export ISAAC_WS=$HOME/projects/isaac source $ASTROBEE_BUILD_PATH/devel/setup.bash source $ISAAC_WS/devel/setup.bash diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index 0d375bfe..5dd504f3 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -1392,7 +1392,7 @@ void calc_median_residuals(std::vector const& residuals, double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset - haz_cam_start_time; - std::cout << "nav haz nav time bracket " + std::cout << "nav_start haz nav_end times " << nav_start << ' ' << haz_time << ' ' << nav_end << std::endl; std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; @@ -1441,7 +1441,7 @@ void calc_median_residuals(std::vector const& residuals, - haz_cam_start_time; double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset - haz_cam_start_time; - std::cout << "nav sci nav time bracket " + std::cout << "nav_start sci nav_end times " << nav_start << ' ' << sci_time << ' ' << nav_end << std::endl; std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index 395854e3..ca56c9a4 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -51,7 +51,7 @@ #include #include -// #include // for reading and writing ply files +#include // for reading and writing ply files #include #include @@ -251,15 +251,18 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const return false; } -typedef cv::Vec Vec4d; +// Store x, y, z, intensity, and weight. +typedef cv::Vec Vec5d; // Check if first three coordinates of a vector are 0 -bool zeroXyz(Vec4d const& p) { return (p[0] == 0) && (p[1] == 0) && (p[2] == 0); } +bool zeroXyz(Vec5d const& p) { return (p[0] == 0) && (p[1] == 0) && (p[2] == 0); } // Add a given vertex to the ply file unless already present -inline void add_vertex(Vec4d const& V, std::pair const& pix, - std::map, size_t>& pix_to_vertex, size_t& vertex_count, - std::vector>& vertices, std::vector>& colors) { +inline void add_vertex(Vec5d const& V, std::pair const& pix, // NOLINT + std::map, size_t>& pix_to_vertex, // NOLINT + size_t& vertex_count, // NOLINT + std::vector>& vertices, // NOLINT + std::vector>& colors) { // NOLINT // Do not add the zero vertex if (zeroXyz(V)) return; @@ -276,15 +279,17 @@ inline void add_vertex(Vec4d const& V, std::pair const& pix, vertex_count++; } -double seg_len(Vec4d const& A, Vec4d const& B) { return Eigen::Vector3d(A[0] - B[0], A[1] - B[1], A[2] - B[2]).norm(); } +double seg_len(Vec5d const& A, Vec5d const& B) { + return Eigen::Vector3d(A[0] - B[0], A[1] - B[1], A[2] - B[2]).norm(); // NOLINT +} -void add_dist(Vec4d const& A, Vec4d const& B, std::vector& distances) { +void add_dist(Vec5d const& A, Vec5d const& B, std::vector& distances) { if (!zeroXyz(A) && !zeroXyz(B)) distances.push_back(seg_len(A, B)); } // If the distance between two points is more than this len, // add more points in between on the edge connecting them. -void add_points(Vec4d const& A, Vec4d const& B, double min_len, std::vector& points) { +void add_points(Vec5d const& A, Vec5d const& B, double min_len, std::vector& points) { if (zeroXyz(A) || zeroXyz(B)) return; double curr_len = seg_len(A, B); @@ -293,11 +298,13 @@ void add_points(Vec4d const& A, Vec4d const& B, double min_len, std::vector(i) / static_cast(num); - Vec4d C = (1.0 - t) * A + t * B; + Vec5d C = (1.0 - t) * A + t * B; points.push_back(C); } } @@ -313,10 +320,10 @@ double median_mesh_edge(cv::Mat const& depthMat) { std::pair pix_ur = std::make_pair(row + 1, col); std::pair pix_ll = std::make_pair(row, col + 1); std::pair pix_lr = std::make_pair(row + 1, col + 1); - Vec4d UL = depthMat.at(pix_ul.first, pix_ul.second); - Vec4d UR = depthMat.at(pix_ur.first, pix_ur.second); - Vec4d LL = depthMat.at(pix_ll.first, pix_ll.second); - Vec4d LR = depthMat.at(pix_lr.first, pix_lr.second); + Vec5d UL = depthMat.at(pix_ul.first, pix_ul.second); + Vec5d UR = depthMat.at(pix_ur.first, pix_ur.second); + Vec5d LL = depthMat.at(pix_ll.first, pix_ll.second); + Vec5d LR = depthMat.at(pix_lr.first, pix_lr.second); add_dist(UL, UR, distances); add_dist(UL, LL, distances); @@ -338,13 +345,13 @@ double median_mesh_edge(cv::Mat const& depthMat) { // Add points on edges and inside triangles having distances longer // than the median edge length times a factor. Store the combined set // in depthMat by adding further rows to it. -void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { +void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero) { double median_len = median_mesh_edge(depthMat); // Add points if lengths are more than this double min_len = 1.3 * median_len; - std::vector points; + std::vector points; for (int row = 0; row < depthMat.rows - 1; row++) { for (int col = 0; col < depthMat.cols - 1; col++) { @@ -352,13 +359,13 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { std::pair pix_ur = std::make_pair(row + 1, col); std::pair pix_ll = std::make_pair(row, col + 1); std::pair pix_lr = std::make_pair(row + 1, col + 1); - Vec4d UL = depthMat.at(pix_ul.first, pix_ul.second); - Vec4d UR = depthMat.at(pix_ur.first, pix_ur.second); - Vec4d LL = depthMat.at(pix_ll.first, pix_ll.second); - Vec4d LR = depthMat.at(pix_lr.first, pix_lr.second); + Vec5d UL = depthMat.at(pix_ul.first, pix_ul.second); + Vec5d UR = depthMat.at(pix_ur.first, pix_ur.second); + Vec5d LL = depthMat.at(pix_ll.first, pix_ll.second); + Vec5d LR = depthMat.at(pix_lr.first, pix_lr.second); // Add points on each edge - std::vector points1; + std::vector points1; add_points(UL, UR, min_len, points1); add_points(UL, LL, min_len, points1); add_points(UR, LL, min_len, points1); @@ -369,7 +376,7 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { // Valid face // See if to add the triangle center - Vec4d C = (UL + UR + LL) / 3.0; + Vec5d C = (UL + UR + LL) / 3.0; if (seg_len(UL, C) > min_len || seg_len(UR, C) > min_len || seg_len(LL, C) > min_len) points1.push_back(C); // Add points from triangle center to each point added so far @@ -377,7 +384,7 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { } // Add points on each edge of this triangle - std::vector points2; + std::vector points2; add_points(UR, LR, min_len, points2); add_points(UR, LL, min_len, points2); add_points(LR, LL, min_len, points2); @@ -387,7 +394,7 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { // See if to add the triangle center int curr_len2 = points2.size(); - Vec4d C = (UR + LR + LL) / 3.0; + Vec5d C = (UR + LR + LL) / 3.0; if (seg_len(UR, C) > min_len || seg_len(LR, C) > min_len || seg_len(LL, C) > min_len) points2.push_back(C); // Add points from triangle center to each point added so far @@ -414,12 +421,12 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { depthMat.copyTo(workMat); // Resize depthMat, all info in it is now lost, and have only zeros - depthMat = cv::Mat(workMat.rows + num_new_rows, workMat.cols, CV_64FC4, Zero); + depthMat = cv::Mat_(workMat.rows + num_new_rows, workMat.cols, Zero); // Copy the relevant chunk from workMat for (int row = 0; row < workMat.rows; row++) { for (int col = 0; col < workMat.cols; col++) { - depthMat.at(row, col) = workMat.at(row, col); + depthMat.at(row, col) = workMat.at(row, col); } } @@ -431,8 +438,8 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { if (count >= num_points) break; - depthMat.at(row, col) = points[count]; - depthMat.at(row, col)[3] = -1; // so that the mesh does not have it as a face + depthMat.at(row, col) = points[count]; + depthMat.at(row, col)[3] = -1; // so that the mesh does not have it as a face } // Need to make sure to break the outer loop too @@ -442,9 +449,7 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { if (count != num_points) LOG(FATAL) << "Bookkeeping error. Did not add all the points to the depth map."; } -#if 0 // Turning this off till it is decided if to include happly.h or not - -// Form a mesh. Ignore (x, y, z, i) values with (x, y, z) = (0, 0, 0). +// Form a mesh. Ignore (x, y, z, i, w) values with (x, y, z) = (0, 0, 0). // Add a vertex but not a face if i is negative. void save_mesh(cv::Mat const& depthMat, std::string const& plyFileName) { size_t vertex_count = 0; @@ -459,10 +464,10 @@ void save_mesh(cv::Mat const& depthMat, std::string const& plyFileName) { std::pair pix_ur = std::make_pair(row + 1, col); std::pair pix_ll = std::make_pair(row, col + 1); std::pair pix_lr = std::make_pair(row + 1, col + 1); - Vec4d UL = depthMat.at(pix_ul.first, pix_ul.second); - Vec4d UR = depthMat.at(pix_ur.first, pix_ur.second); - Vec4d LL = depthMat.at(pix_ll.first, pix_ll.second); - Vec4d LR = depthMat.at(pix_lr.first, pix_lr.second); + Vec5d UL = depthMat.at(pix_ul.first, pix_ul.second); + Vec5d UR = depthMat.at(pix_ur.first, pix_ur.second); + Vec5d LL = depthMat.at(pix_ll.first, pix_ll.second); + Vec5d LR = depthMat.at(pix_lr.first, pix_lr.second); // Add a vertex even though no face has it as a corner add_vertex(UL, pix_ul, pix_to_vertex, vertex_count, vertices, colors); @@ -502,7 +507,6 @@ void save_mesh(cv::Mat const& depthMat, std::string const& plyFileName) { std::cout << "Writing: " << plyFileName << std::endl; ply.write(plyFileName, happly::DataFormat::ASCII); } -#endif // Find the median of a sorted vector // TODO(oalexan1): May need to put here a partial check that the vector is sorted @@ -518,7 +522,7 @@ double find_median(std::vector const& v) { // Throw out as outliers points whose x, y, or z values differs // from the median of such values by more than this distance. // This can be expensive for win >= 25. -void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int win, double delta) { +void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero, int win, double delta) { // Copy it to workMat, with the latter not being modified below depthMat.copyTo(workMat); @@ -527,7 +531,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w for (int col = 0; col < workMat.cols; col++) { std::vector dist_x, dist_y, dist_z; - Vec4d pt = workMat.at(row, col); + Vec5d pt = workMat.at(row, col); // Is an outlier already if (zeroXyz(pt)) continue; @@ -538,7 +542,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w if (row + irow < 0 || row + irow >= workMat.rows) continue; if (col + icol < 0 || col + icol >= workMat.cols) continue; - Vec4d curr_pt = workMat.at(row + irow, col + icol); + Vec5d curr_pt = workMat.at(row + irow, col + icol); if (zeroXyz(curr_pt)) continue; @@ -550,7 +554,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w if (dist_x.size() < std::max(2, std::min(5, win))) { // so few neighbors, could just toss it out - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; continue; } @@ -560,7 +564,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w if (std::abs(find_median(dist_x) - pt[0]) > delta || std::abs(find_median(dist_y) - pt[1]) > delta || std::abs(find_median(dist_z) - pt[2]) > delta) { - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; } } } @@ -571,7 +575,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w // ray from the existing point to the new point being almost parallel // to existing nearby rays (emanating from camera center) which is not // good. -void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Zero, int radius1, int radius2, +void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Zero, int radius1, int radius2, double foreshortening_delta) { // Copy depthMat to workMat, with the latter unchanged below depthMat.copyTo(workMat); @@ -581,8 +585,8 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z for (int col = 0; col < depthMat.cols; col++) { // Points which do not need to be filled can be skipped // and let them keep their existing value - if (!zeroXyz(workMat.at(row, col))) { - depthMat.at(row, col) = workMat.at(row, col); + if (!zeroXyz(workMat.at(row, col))) { + depthMat.at(row, col) = workMat.at(row, col); continue; } @@ -590,7 +594,7 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z // A point on an axis counts towards both quadrants it touches. cv::Mat valid(cv::Size(2, 2), CV_8U, cv::Scalar(0)); - Vec4d pt_sum = Zero; + Vec5d pt_sum = Zero; double wt_sum = 0.0; for (int irow = -radius1; irow <= radius1; irow++) { @@ -604,7 +608,7 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z // Look only within given radius if (dist > static_cast(radius1)) continue; - Vec4d curr_pt = workMat.at(row + irow, col + icol); + Vec5d curr_pt = workMat.at(row + irow, col + icol); // Note that these are not exclusive, as points on axes // can be in multiple quadrants @@ -653,11 +657,11 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z if (num_good_quadrants < 4) will_accept = false; if (!will_accept || wt_sum == 0.0) { - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; continue; } - Vec4d new_pt = pt_sum / wt_sum; + Vec5d new_pt = pt_sum / wt_sum; Eigen::Vector3d Z(0, 0, 1); // See if this point results in rays which are too parallel to existing rays @@ -672,7 +676,7 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z // Check only within given radius if (dist > static_cast(radius2)) continue; - Vec4d curr_pt = workMat.at(row + irow, col + icol); + Vec5d curr_pt = workMat.at(row + irow, col + icol); if (zeroXyz(curr_pt)) continue; // Not a valid point @@ -696,18 +700,18 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z } if (!will_accept) { - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; continue; } // accept - depthMat.at(row, col) = new_pt; + depthMat.at(row, col) = new_pt; } } } // Smooth newly added points and also their immediate neighbors -void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Zero, +void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Zero, int radius) { // Copy depthMat to workMat, with the latter not being changed below. // Note how we use origMat to keep track of points which did not @@ -725,7 +729,7 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa if (row + irow < 0 || row + irow >= depthMat.rows) continue; if (col + icol < 0 || col + icol >= depthMat.cols) continue; - if (zeroXyz(origMat.at(row + irow, col + icol))) { + if (zeroXyz(origMat.at(row + irow, col + icol))) { isNew = true; break; } @@ -736,9 +740,9 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa if (!isNew) continue; // Do not smooth points which have no good value to start with - if (zeroXyz(workMat.at(row, col))) continue; + if (zeroXyz(workMat.at(row, col))) continue; - Vec4d pt_sum = Zero; + Vec5d pt_sum = Zero; double wt_sum = 0.0; for (int irow = -radius; irow <= radius; irow++) { @@ -751,7 +755,7 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa if (dist > static_cast(radius)) continue; - Vec4d curr_pt = workMat.at(row + irow, col + icol); + Vec5d curr_pt = workMat.at(row + irow, col + icol); if (zeroXyz(curr_pt)) continue; // Not a valid point @@ -764,8 +768,37 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa } } - Vec4d new_pt = pt_sum / wt_sum; - depthMat.at(row, col) = new_pt; + Vec5d new_pt = pt_sum / wt_sum; + depthMat.at(row, col) = new_pt; + } + } +} + +// Add weights which are higher at image center and decrease towards +// boundary. This makes voxblox blend better. +void calc_weights(cv::Mat& depthMat, double exponent) { + for (int row = 0; row < depthMat.rows; row++) { +#pragma omp parallel for + for (int col = 0; col < depthMat.cols; col++) { + if (zeroXyz(depthMat.at(row, col))) continue; + + double drow = row - depthMat.rows / 2.0; + double dcol = col - depthMat.cols / 2.0; + double dist_sq = drow * drow + dcol * dcol; + + // Some kind of function decaying from center + float weight = 1.0 / pow(1.0 + dist_sq, exponent/2.0); + + // Also, points that are further in z are given less weight + double z = depthMat.at(row, col)[2]; + weight *= 1.0 / (0.001 + z * z); + + // Make sure the weight does not get too small as voxblox + // will read it as a float. Here making the weights + // just a little bigger than what voxblox will accept. + weight = std::max(weight, static_cast(1.1e-6)); + + depthMat.at(row, col)[4] = weight; } } } @@ -773,8 +806,9 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat const& haz_cam_intensity, Eigen::Affine3d const& hazcam_depth_to_image_transform, int depth_exclude_columns, int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, - std::vector const& median_filter_params, bool save_debug_data, - const char* filename_buffer, Eigen::MatrixXd const& desired_cam_to_world_trans) { + double reliability_weight_exponent, std::vector const& median_filter_params, + bool save_debug_data, const char* filename_buffer, + Eigen::MatrixXd const& desired_cam_to_world_trans) { // Sanity checks if (static_cast(haz_cam_intensity.cols) != static_cast(pc_msg->width) || static_cast(haz_cam_intensity.rows) != static_cast(pc_msg->height)) @@ -789,11 +823,13 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Two constants float inf = std::numeric_limits::infinity(); - Vec4d Zero(0, 0, 0, 0); + Vec5d Zero(0, 0, 0, 0, 0); - // A matrix storing the depth cloud and a temporary work matrix - cv::Mat depthMat(haz_cam_intensity.rows, haz_cam_intensity.cols, CV_64FC4, Zero); - cv::Mat workMat(haz_cam_intensity.rows, haz_cam_intensity.cols, CV_64FC4, Zero); + // A matrix storing the depth cloud and a temporary work matrix. + // These have 5 channels: x, y, z, intensity, and weight. The weight + // determines the reliability of a point. + cv::Mat_ depthMat(haz_cam_intensity.rows, haz_cam_intensity.cols, Zero); + cv::Mat_ workMat(haz_cam_intensity.rows, haz_cam_intensity.cols, Zero); // Populate depthMat int count = -1; @@ -809,9 +845,9 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co row < depth_exclude_rows || depthMat.rows - row < depth_exclude_rows); if ((x == 0 && y == 0 && z == 0) || skip) - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; else - depthMat.at(row, col) = Vec4d(x, y, z, i); + depthMat.at(row, col) = Vec5d(x, y, z, i, 0); } } @@ -838,20 +874,22 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co int radius = depth_hole_fill_diameter / 4.0; if (depth_hole_fill_diameter > 0) smooth_additions(origMat, depthMat, workMat, sigma, Zero, radius); + // This is the right place at which to add weights, before adding + // extra points messes up with the number of rows in in depthMat. + calc_weights(depthMat, reliability_weight_exponent); + // Add extra points, but only if we are committed to manipulating the // depth cloud to start with if (depth_hole_fill_diameter > 0) add_extra_pts(depthMat, workMat, Zero); -#if 0 if (save_debug_data) { // Save the updated depthMat as a mesh (for debugging) std::string plyFileName = filename_buffer + std::string(".ply"); save_mesh(depthMat, plyFileName); } -#endif // Initialize point cloud that we will pass to voxblox - pcl::PointCloud pci; + pcl::PointCloud pci; pci.width = depthMat.cols; pci.height = depthMat.rows; pci.points.resize(pci.width * pci.height); @@ -861,7 +899,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co for (int col = 0; col < depthMat.cols; col++) { count++; - Vec4d pt = depthMat.at(row, col); + Vec5d pt = depthMat.at(row, col); // An outlier if (zeroXyz(pt)) { @@ -869,7 +907,10 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co pci.points[count].x = inf; pci.points[count].y = inf; pci.points[count].z = inf; - pci.points[count].intensity = 0; + pci.points[count].normal_x = 0; // intensity + pci.points[count].normal_y = 0; // weight + pci.points[count].normal_z = 0; // ensure initialization + pci.points[count].curvature = 0; // ensure initialization continue; } @@ -880,8 +921,12 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co pci.points[count].z = X[2]; if (pt[3] <= 0) pt[3] = 100; // Ensure a positive value for the color + if (pt[4] < 0) pt[4] = 0; // Ensure a non-negative weight - pci.points[count].intensity = pt[3]; + pci.points[count].normal_x = pt[3]; // intensity + pci.points[count].normal_y = pt[4]; // weight + pci.points[count].normal_z = 0; // ensure initialization + pci.points[count].curvature = 0; // ensure initialization } } @@ -893,11 +938,16 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Find the image of distances in depthMat, for debugging. Note // that we ignore the extra points we added as new rows depthMat in // add_extra_pts(). + + // The depthMat may have extra rows, but we assume the number of columns + // did not change. + if (haz_cam_intensity.cols != depthMat.cols) LOG(FATAL) << "Incorrect number of columns in depthMat\n"; + cv::Mat depthDist(haz_cam_intensity.rows, haz_cam_intensity.cols, CV_32FC1, cv::Scalar(0)); count = -1; for (int row = 0; row < depthDist.rows; row++) { for (int col = 0; col < depthDist.cols; col++) { - Vec4d pt = depthMat.at(row, col); + Vec5d pt = depthMat.at(row, col); Eigen::Vector3d eigen_pt(pt[0], pt[1], pt[2]); double dist = eigen_pt.norm(); depthDist.at(row, col) = dist; @@ -930,11 +980,10 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co std::cout << "Writing: " << fileName << std::endl; cv::imwrite(fileName, 255.0 * (depthDist - min_dist) / (max_dist - min_dist)); -#if 0 // Transform (modifies depthMat) and save the transformed mesh. For debugging. for (int row = 0; row < depthMat.rows; row++) { for (int col = 0; col < depthMat.cols; col++) { - Vec4d pt = depthMat.at(row, col); + Vec5d pt = depthMat.at(row, col); if (zeroXyz(pt)) continue; @@ -949,17 +998,15 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co V = desired_cam_to_world_trans * V; for (size_t it = 0; it < 3; it++) - depthMat.at(row, col)[it] = V[it]; + depthMat.at(row, col)[it] = V[it]; } } std::string plyFileName = filename_buffer + std::string("_trans.ply"); save_mesh(depthMat, plyFileName); -#endif } } - #if 0 // This is an instructive code to add to saveCameraPoses() @@ -1088,6 +1135,7 @@ void localize_sci_cam_directly() { Eigen::Affine3d& hazcam_depth_to_image_transform, int depth_exclude_columns, int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, + double reliability_weight_exponent, std::vector const& median_filter_params, bool save_debug_data, std::vector const& nav_cam_msgs, @@ -1282,8 +1330,9 @@ void localize_sci_cam_directly() { // Append the intensity to the point cloud and save it save_proc_depth_cloud(pc_msg, haz_cam_intensity, hazcam_depth_to_image_transform, depth_exclude_columns, - depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, median_filter_params, - save_debug_data, filename_buffer, desired_cam_to_world_trans); + depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, + reliability_weight_exponent, median_filter_params, save_debug_data, filename_buffer, + desired_cam_to_world_trans); // Save the name of the point cloud to the index ofs << filename_buffer << "\n"; @@ -1424,6 +1473,9 @@ void localize_sci_cam_directly() { "Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds " "are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh " "later (--max_hole_diameter)."); + DEFINE_double(reliability_weight_exponent, 2.0, + "A larger value will give more weight to depth points corresponding to pixels closer to depth image " + "center, which are considered more reliable."); DEFINE_bool(simulated_data, false, "If specified, use data recorded in simulation. " "Then haz and sci camera poses and intrinsics should be recorded in the bag file."); @@ -1587,7 +1639,9 @@ void localize_sci_cam_directly() { double desired_cam_to_nav_cam_offset = -navcam_to_hazcam_timestamp_offset; saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, hazcam_to_navcam_trans, hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, median_filter_params, + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, + FLAGS_reliability_weight_exponent, + median_filter_params, FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, sci_cam_exif, FLAGS_output_dir, "", // not needed @@ -1603,7 +1657,9 @@ void localize_sci_cam_directly() { double desired_cam_to_nav_cam_offset = scicam_to_hazcam_timestamp_offset - navcam_to_hazcam_timestamp_offset; saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, scicam_to_navcam_trans, hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, median_filter_params, + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, + FLAGS_reliability_weight_exponent, + median_filter_params, FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, sci_cam_exif, FLAGS_output_dir, sci_cam_dir, FLAGS_start, FLAGS_duration, @@ -1625,7 +1681,9 @@ void localize_sci_cam_directly() { dense_map::StampedPoseStorage sim_nav_cam_poses; // won't be used saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, navcam_to_navcam_trans, hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, median_filter_params, + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, + FLAGS_reliability_weight_exponent, + median_filter_params, FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, sci_cam_exif, FLAGS_output_dir, nav_cam_dir, FLAGS_start, FLAGS_duration, diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index bd384ea2..0cf81c17 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -310,13 +310,13 @@ def process_args(args): def sanity_checks(geometry_mapper_path, batch_tsdf_path, args): - # Check if the environemnt was set - for var in ( + # Check if the environment was set + for var in [ "ASTROBEE_RESOURCE_DIR", "ASTROBEE_CONFIG_DIR", "ASTROBEE_WORLD", "ASTROBEE_ROBOT", - ): + ]: if var not in os.environ: raise Exception("Must set " + var) diff --git a/licenses.csv b/licenses.csv index 6b7bedae..68e84db3 100644 --- a/licenses.csv +++ b/licenses.csv @@ -1,5 +1,6 @@ Type,Name,URL,License,Modified?,Notes Library (external),MVS-Texturing, https://github.com/nmoehrle/mvs-texturing.git,BSD 3-Clause License,YES,for geometric mapper just awaiting pull request to be accepted +Library (external),happly,https://github.com/nmwsharp/happly,MIT license,No,Utility for writing ply files Library (external),libgp,https://github.com/mblum/libgp,BSD-3-Clause License,NO,used for volumetric mapper Library (external),https://github.com/bmegli/wifi-scan,MPL-2.0 License,YES,modified to the point we refactored all their code Library (external),pytorch,https://github.com/pytorch/pytorch,Modified BSD license,NO,used for image classification @@ -38,4 +39,4 @@ ROS package,catkin,https://github.com/catkin/catkin_tools,Apache-2.0 License,NO, ROS package,pcl,https://github.com/ros-gbp/pcl-release,BSD license,NO,None Build tool,GCC / G++ / OpenMP,http://www.openmp.org ,GPLv3,NO,None -Build tool,Doxygen,http://www.stack.nl/~dimitri/doxygen,GPLv2,NO,None \ No newline at end of file +Build tool,Doxygen,http://www.stack.nl/~dimitri/doxygen,GPLv2,NO,None diff --git a/scripts/git/cpplint_repo.py b/scripts/git/cpplint_repo.py old mode 100755 new mode 100644 index 7ca56870..f329a256 --- a/scripts/git/cpplint_repo.py +++ b/scripts/git/cpplint_repo.py @@ -72,7 +72,7 @@ def main(): ): continue for filename in filenames: - if "TinyEXIF" in filename: + if "TinyEXIF" or "happly" in filename: continue if not filename.endswith( ( diff --git a/scripts/git/pre-commit.linter_cpp b/scripts/git/pre-commit.linter_cpp index 7eb616d4..cd15d64f 100755 --- a/scripts/git/pre-commit.linter_cpp +++ b/scripts/git/pre-commit.linter_cpp @@ -102,6 +102,7 @@ def main(): and not "debian" in filename and not "build" in filename and not "TinyEXIF" in filename + and not "happly" in filename ): cpp_files_to_lint.append(filename) diff --git a/scripts/git/pre-commit.linter_python b/scripts/git/pre-commit.linter_python index 06e4069e..9fbb6774 100755 --- a/scripts/git/pre-commit.linter_python +++ b/scripts/git/pre-commit.linter_python @@ -29,36 +29,38 @@ fi failed_lint=false -if ! command -v black &> /dev/null -then - echo "Black was not found, to install check https://github.com/psf/black" - echo "Unfortunately black requires Python 3.6.2+ to run" +command -v black > /dev/null 2>&1 +ans=$? +if [ "$ans" -ne "0" ]; then + echo "'black' was not found. To install, check https://github.com/psf/black." + echo "Note that black requires Python 3.6.2+ to run." exit 0 fi -if ! command -v isort &> /dev/null -then - echo "isort was not found, to install check https://github.com/PyCQA/isort" +command -v isort > /dev/null 2>&1 +ans=$? +if [ "$ans" -ne "0" ]; then + echo "'isort' was not found. To install, check https://github.com/PyCQA/isort." exit 0 fi echo "==================================================" -echo " Analysing with black" +echo " Analysing python code style with 'black'." # This check the files but they will not be commited if `black . --include ${files} --check --quiet`; then - echo "Linter black checks passed" + echo "Linter checks using 'black' passed." else - echo "Errors detected, fixing ..." - black . --include ${files} - failed_lint=true + echo "Errors detected with 'black'. Fixing them. Try to add and commit your files again." + black . --include ${files} + failed_lint=true fi echo "==================================================" -echo " Analysing with isort" +echo " Analysing python code style with 'isort'." if $(isort ${files} --extend-skip cmake --profile black --diff --check-only --quiet >/dev/null); then - echo "Linter isort checks passed" + echo "Linter checks using 'isort' passed." else - echo "Errors detected, fixing ..." + echo "Errors detected with 'isort'. Fixing them. Try to add and commit your files again." isort ${files} --extend-skip cmake --profile black >/dev/null failed_lint=true fi From d483c6dc74ed5427d16b6ac72094a6055eaa536b Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Thu, 2 Dec 2021 18:53:35 -0800 Subject: [PATCH 05/40] geometry_mapper.py: expose to user the reliability weight option --- .../geometry_mapper/tools/geometry_mapper.py | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index 0cf81c17..d932d0d2 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -167,6 +167,12 @@ def process_args(args): default="30", help="Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (--max_hole_diameter).", ) + parser.add_argument( + "--reliability_weight_exponent", + dest="reliability_weight_exponent", + default="2", + help="A larger value will give more weight to depth points corresponding to pixels closer to depth image center, which are considered more reliable.", + ) parser.add_argument( "--max_ray_length", dest="max_ray_length", @@ -181,6 +187,13 @@ def process_args(args): help="When fusing the depth point clouds use a voxel of this size, " + "in meters.", ) + parser.add_argument( + "--voxblox_integrator", + dest="voxblox_integrator", + default="merged", + help="When fusing the depth point clouds use this VoxBlox method. " + + 'Options are: "merged", "simple", and "fast".', + ) parser.add_argument( "--max_iso_times_exposure", dest="max_iso_times_exposure", @@ -424,6 +437,8 @@ def compute_poses_and_clouds(geometry_mapper_path, args): args.foreshortening_delta, "--depth_hole_fill_diameter", args.depth_hole_fill_diameter, + "--reliability_weight_exponent", + args.reliability_weight_exponent, "--median_filters", args.median_filters, ] + args.localization_options.split(" ") @@ -464,7 +479,14 @@ def fuse_clouds(batch_tsdf_path, mesh, args): haz_cam_index = os.path.join(args.output_dir, "haz_cam_index.txt") - cmd = [batch_tsdf_path, haz_cam_index, mesh, args.max_ray_length, args.voxel_size] + cmd = [ + batch_tsdf_path, + haz_cam_index, + mesh, + args.max_ray_length, + args.voxel_size, + args.voxblox_integrator, + ] log_file = os.path.join(args.output_dir, "voxblox_log.txt") print( From 1b3dd053b8978e92c9176b52c0bd8533b4e2c17a Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Thu, 2 Dec 2021 19:01:03 -0800 Subject: [PATCH 06/40] Minor geometry mapper style tweaks --- dense_map/geometry_mapper/tools/geometry_mapper.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index ca56c9a4..1f460476 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -1474,8 +1474,8 @@ void localize_sci_cam_directly() { "are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh " "later (--max_hole_diameter)."); DEFINE_double(reliability_weight_exponent, 2.0, - "A larger value will give more weight to depth points corresponding to pixels closer to depth image " - "center, which are considered more reliable."); + "A larger value will give more weight to depth points corresponding to " + "pixels closer to depth image center, which are considered more reliable."); DEFINE_bool(simulated_data, false, "If specified, use data recorded in simulation. " "Then haz and sci camera poses and intrinsics should be recorded in the bag file."); From 3d7675bacab24016001e9a8b9ddce125688bac94 Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Thu, 2 Dec 2021 19:26:55 -0800 Subject: [PATCH 07/40] Make cpplint_repo.py executable --- scripts/git/cpplint_repo.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 scripts/git/cpplint_repo.py diff --git a/scripts/git/cpplint_repo.py b/scripts/git/cpplint_repo.py old mode 100644 new mode 100755 From 9394809e88b02ef8ef55328f1e9980cf53a2ae30 Mon Sep 17 00:00:00 2001 From: Trey Smith Date: Mon, 6 Dec 2021 18:16:33 -0800 Subject: [PATCH 08/40] pano_orientations.py: add logic for 1-row or 1-column panoramas --- .../inspection/scripts/pano_orientations.py | 130 +++++++++++++----- 1 file changed, 99 insertions(+), 31 deletions(-) diff --git a/astrobee/behaviors/inspection/scripts/pano_orientations.py b/astrobee/behaviors/inspection/scripts/pano_orientations.py index 801b259b..36a57cb1 100755 --- a/astrobee/behaviors/inspection/scripts/pano_orientations.py +++ b/astrobee/behaviors/inspection/scripts/pano_orientations.py @@ -31,18 +31,26 @@ def toRPY(rot): def pano1D(rangeMin, rangeMax, fov, overlap): """ Returns image center coordinates such that images cover the range - @rangeMin .. @rangeMax with at least the specified @overlap. + @rangeMin .. @rangeMax with at least the specified @overlap. If one + image suffices, it will be centered between @rangeMin and @rangeMax (and + the edges of the image will beyond the range if it is smaller than @fov). + If more than one image is needed to cover the range, the boundary images + will cover exactly to the edges of the specified range and the images + will be evenly spaced. :param float rangeMin: Minimum angle of minimum image (degrees). :param float rangeMax: Maximum angle of maximum image (degrees). :param float fov: Field of view of each image (degrees). - :param float overlap: Overlap between consecutive images, as a proportion - of the image field of view (0 .. 1). :return: A vector of orientations of - image centers. + :param float overlap: Minimum required overlap between consecutive images, as a proportion of the image field of view (0 .. 1). + :return: A vector of orientations of image centers. """ - assert rangeMin < rangeMax + assertLte(rangeMin, rangeMax, EPS) W = rangeMax - rangeMin + if W < fov: + # Special case: Only one image needed. Center it. + return np.array([0.5 * (rangeMin + rangeMax)]) + # sufficient overlap criterion: stride <= fov * (1 - overlap) # (k - 1) * stride + fov = W # stride = (W - fov) / (k - 1) @@ -54,14 +62,18 @@ def pano1D(rangeMin, rangeMax, fov, overlap): if 1: # optional sanity checks + stride = (W - fov) / (k - 1) assertLte(stride, fov * (1 - overlap), EPS) # sufficient overlap - if k > 2: - stride1 = (W - fov) / (k - 2) - assertGte(stride1, fov * (1 - overlap), EPS) # k is minimized + # check if we have more images than necessary + if k == 1: + pass # obviously need at least one image elif k == 2: assertLte(fov, W, EPS) # k = 1 is not enough + else: + stride1 = (W - fov) / (k - 2) + assertGte(stride1, fov * (1 - overlap), EPS) # k is minimized minCenter = rangeMin + fov / 2 maxCenter = rangeMax - fov / 2 @@ -75,7 +87,7 @@ def pano1DCompletePan(fov, overlap): including at the wrap-around, and the image sequence is centered at pan = 0. :param float fov: Field of view of each image (degrees). - :param float overlap: Overlap between consecutive images, as a proportion of the image field of view (0 .. 1). + :param float overlap: Minimum required overlap between consecutive images, as a proportion of the image field of view (0 .. 1). :return: A vector of orientations of image centers. """ k = math.ceil(360 / (fov * (1 - overlap))) @@ -98,7 +110,7 @@ def panoOrientations(panMin, panMax, tiltMin, tiltMax, hFov, vFov, overlap, preT :param float tiltMax: Tilt angle of top edge of top row (degrees). :param float hFov: Horizontal field of view of each image (degrees). :param float vFov: Vertical field of view of each image (degrees). - :param float overlap: Overlap between consecutive images, as a proportion of the image field of view (0 .. 1). + :param float overlap: Minimum required overlap between consecutive images, as a proportion of the image field of view (0 .. 1). :param float preTilt: Offsets the (pan, tilt) = (0, 0) center in tilt (degrees) so as to efficiently capture panoramas centered near tilt = +/- 90. Example: When preTilt = -90, tilt = 0 is offset to point straight down. :return: (imageCenters, ncols, nrows). A list of orientations of image centers, the number of columns in the panorama, and the number of rows. """ @@ -159,34 +171,46 @@ def checkPano(pano, panMin, panMax, tiltMin, tiltMax, hFov, vFov, overlap, preTi imageCenters, ncols, nrows = pano topLeft = getEuler(imageCenters[0], preTilt) - p11 = getEuler(imageCenters[nrows * 1 + 1], preTilt) - bottomRight = getEuler(imageCenters[-1], preTilt) - panStride = p11[YAW] - topLeft[YAW] - tiltStride = -(p11[PITCH] - topLeft[PITCH]) + if ncols == 1: + assertLte(panMax - panMin, hFov, EPS) # one column is enough + else: + nextPan = getEuler(imageCenters[nrows], preTilt) + panStride = nextPan[YAW] - topLeft[YAW] + assertLte(panStride, hFov * (1 - overlap), EPS) # pan overlaps enough - assertLte(panStride, hFov * (1 - overlap), EPS) # pan overlaps enough - assertLte(tiltStride, vFov * (1 - overlap), EPS) # tilt overlaps enough + if nrows == 1: + assertLte(tiltMax - tiltMin, vFov, EPS) # one row is enough + else: + nextTilt = getEuler(imageCenters[1], preTilt) + tiltStride = -(nextTilt[PITCH] - topLeft[PITCH]) + assertLte(tiltStride, vFov * (1 - overlap), EPS) # tilt overlaps enough - # we should not be able to remove a column - if ncols > 2: - if panMin == -180 and panMax == 180: - panStride1 = 360 / (ncols - 1) - else: - panStride1 = ((panMax - panMin) - hFov) / (ncols - 2) - assertGte(panStride1, hFov * (1 - overlap), EPS) # ncols is minimized + # we shouldn't be able to remove a column + if ncols == 1: + pass # obviously can't remove single column elif ncols == 2: if panMin == -180 and panMax == 180: assertLte(hFov * (1 - overlap), 360) # one column is not enough else: assertLte(hFov, panMax - panMin, EPS) # one column is not enough + else: + if panMin == -180 and panMax == 180: + panStride1 = 360 / (ncols - 1) + else: + panStride1 = ((panMax - panMin) - hFov) / (ncols - 2) + assertGte(panStride1, hFov * (1 - overlap), EPS) # ncols is minimized - # we should not be able to remove a row - if nrows > 2: - tiltStride1 = ((tiltMax - tiltMin) - vFov) / (nrows - 2) - assertGte(tiltStride1, vFov * (1 - overlap), EPS) # nrows is minimized + # we shouldn't be able to remove a row + if nrows == 1: + pass # obviously can't remove single row elif nrows == 2: assertLte(vFov, tiltMax - tiltMin, EPS) # one row is not enough + else: + tiltStride1 = ((tiltMax - tiltMin) - vFov) / (nrows - 2) + assertGte(tiltStride1, vFov * (1 - overlap), EPS) # nrows is minimized + + bottomRight = getEuler(imageCenters[-1], preTilt) panCenterMin = topLeft[YAW] panCenterMax = bottomRight[YAW] @@ -195,14 +219,24 @@ def checkPano(pano, panMin, panMax, tiltMin, tiltMax, hFov, vFov, overlap, preTi midPan = np.mean((panCenterMin, panCenterMax)) assertEqual(midPan, 0, EPS) # centered else: - assertEqual(panCenterMin - hFov / 2, panMin, EPS) # covers to panMin - assertEqual(panCenterMax + hFov / 2, panMax, EPS) # covers to panMax + if ncols == 1: + assertEqual( + panCenterMin, 0.5 * (panMin + panMax), EPS + ) # single column is centered + else: + assertEqual(panCenterMin - hFov / 2, panMin, EPS) # covers to panMin + assertEqual(panCenterMax + hFov / 2, panMax, EPS) # covers to panMax tiltCenterMax = topLeft[PITCH] tiltCenterMin = bottomRight[PITCH] - assertEqual(tiltCenterMin - vFov / 2, tiltMin, EPS) # covers to tiltMin - assertEqual(tiltCenterMax + vFov / 2, tiltMax, EPS) # covers to tiltMax + if nrows == 1: + assertEqual( + tiltCenterMin, 0.5 * (tiltMin + tiltMax), EPS + ) # single row is centered + else: + assertEqual(tiltCenterMin - vFov / 2, tiltMin, EPS) # covers to tiltMin + assertEqual(tiltCenterMax + vFov / 2, tiltMax, EPS) # covers to tiltMax def testCase(label, config): @@ -316,3 +350,37 @@ def testCase(label, config): "preTilt": -90, }, ) + +# == TEST CASE 5 == +# Test special-case logic for 1-row panorama. + +testCase( + "1-row panorama", + { + "panMin": -60, + "panMax": 60, + "tiltMin": 10, + "tiltMax": 20, + "hFov": 60, + "vFov": 60, + "overlap": 0.3, + "preTilt": 0, + }, +) + +# == TEST CASE 6 == +# Test special-case logic for 1-column panorama. + +testCase( + "1-column panorama", + { + "panMin": 0, + "panMax": 0, + "tiltMin": -60, + "tiltMax": 60, + "hFov": 60, + "vFov": 60, + "overlap": 0.3, + "preTilt": 0, + }, +) From 2af334714cfbf99d1b120eec7088ca4b4d12a6b8 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Tue, 7 Dec 2021 16:12:44 -0800 Subject: [PATCH 09/40] Panorama parameters + other fixes (#4) * renaming filder to match package name * removing redundant pico proxy * installing env_wrapper as program * making it include catkin * making it cross compile * panorama configurable * dealing with 2D inspections * make inspection talk directly with apk * fixing matrix indexes, cleaning up * adding parameter configuration * adding parameter description --- astrobee/behaviors/cargo/CMakeLists.txt | 10 +- astrobee/behaviors/cargo/src/cargo_node.cc | 4 +- astrobee/behaviors/inspection/CMakeLists.txt | 1 - .../include/inspection/inspection.h | 20 +- .../inspection/resources/panorama_granite.txt | 2 + .../behaviors/inspection/src/inspection.cc | 231 ++++++++++++------ .../inspection/src/inspection_node.cc | 63 ++--- .../inspection/tools/inspection_tool.cc | 31 ++- .../{gazebo => isaac_gazebo}/CMakeLists.txt | 0 .../launch/spawn_isaac.launch | 0 .../launch/spawn_object.launch | 0 .../launch/start_simulation.launch | 0 .../{gazebo => isaac_gazebo}/package.xml | 0 .../{gazebo => isaac_gazebo}/readme.md | 0 .../gazebo_model_plugin_cargo.cc | 0 .../gazebo_sensor_plugin_RFID_receiver.cc | 0 .../gazebo_sensor_plugin_RFID_tag.cc | 0 .../gazebo_sensor_plugin_air_quality.cc | 0 .../gazebo_sensor_plugin_heat_cam.cc | 0 .../gazebo_sensor_plugin_wifi_receiver.cc | 0 .../gazebo_sensor_plugin_wifi_transmitter.cc | 0 .../worlds/granite.world | 0 .../{gazebo => isaac_gazebo}/worlds/iss.world | 0 .../{gazebo => isaac_gazebo}/worlds/r2.world | 0 cmake/catkin2Config.cmake | 18 +- isaac/CMakeLists.txt | 2 +- isaac/config/behaviors/inspection.config | 112 ++++----- isaac/launch/robot/ILP.launch | 10 - 28 files changed, 289 insertions(+), 215 deletions(-) create mode 100644 astrobee/behaviors/inspection/resources/panorama_granite.txt mode change 100755 => 100644 astrobee/behaviors/inspection/src/inspection.cc mode change 100755 => 100644 astrobee/behaviors/inspection/src/inspection_node.cc rename astrobee/simulation/{gazebo => isaac_gazebo}/CMakeLists.txt (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/launch/spawn_isaac.launch (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/launch/spawn_object.launch (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/launch/start_simulation.launch (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/package.xml (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/readme.md (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/worlds/granite.world (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/worlds/iss.world (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/worlds/r2.world (100%) diff --git a/astrobee/behaviors/cargo/CMakeLists.txt b/astrobee/behaviors/cargo/CMakeLists.txt index d2c555cf..f5d682c8 100644 --- a/astrobee/behaviors/cargo/CMakeLists.txt +++ b/astrobee/behaviors/cargo/CMakeLists.txt @@ -23,9 +23,8 @@ project(cargo) add_compile_options(-std=c++14) ## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS +SET(catkin2_DIR "${CMAKE_SOURCE_DIR}/../../../cmake") +find_package(catkin2 REQUIRED COMPONENTS roscpp std_msgs nodelet @@ -98,11 +97,6 @@ install(DIRECTORY launch/ DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/launch PATTERN ".svn" EXCLUDE) -# Mark resources files for installation -install(DIRECTORY resources/ - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/resources - PATTERN ".svn" EXCLUDE) - # Required plugin descriptor file for nodelet install(FILES nodelet_plugins.xml DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} diff --git a/astrobee/behaviors/cargo/src/cargo_node.cc b/astrobee/behaviors/cargo/src/cargo_node.cc index 105c4a17..960f608d 100755 --- a/astrobee/behaviors/cargo/src/cargo_node.cc +++ b/astrobee/behaviors/cargo/src/cargo_node.cc @@ -95,7 +95,7 @@ class CargoNode : public ff_util::FreeFlyerNodelet { GOAL_UNPAUSE = (1<<6), // Resume an existing goal MOTION_SUCCESS = (1<<7), // Mobility motion action success MOTION_FAILED = (1<<8), // Mobility motion action problem - ARM_SUCCESS = (1<<9), // Arm motion action success + ARM_SUCCESS = (1<<9), // Arm motion action success ARM_FAILED = (1<<10), // Arm motion action problem DETECT_SUCCESS = (1<<11), // Detection AR tag success DETECT_FAILED = (1<<12), // Detection AR tag failed @@ -442,7 +442,7 @@ class CargoNode : public ff_util::FreeFlyerNodelet { } void GroundConnectedCallback() { - ROS_ERROR_STREAM("GroundConnectedCallback()"); + NODELET_DEBUG_STREAM("GroundConnectedCallback()"); if (!client_d_.IsConnected()) return; // Detection action ground_active_ = true; } diff --git a/astrobee/behaviors/inspection/CMakeLists.txt b/astrobee/behaviors/inspection/CMakeLists.txt index e77fd7f5..c5a10e8d 100644 --- a/astrobee/behaviors/inspection/CMakeLists.txt +++ b/astrobee/behaviors/inspection/CMakeLists.txt @@ -23,7 +23,6 @@ project(inspection) add_compile_options(-std=c++14) ## Find catkin macros and libraries -find_package(catkin REQUIRED) # defines catkin_DIR SET(catkin2_DIR "${CMAKE_SOURCE_DIR}/../../../cmake") find_package(catkin2 REQUIRED COMPONENTS roscpp diff --git a/astrobee/behaviors/inspection/include/inspection/inspection.h b/astrobee/behaviors/inspection/include/inspection/inspection.h index 766d05d0..f833bdec 100755 --- a/astrobee/behaviors/inspection/include/inspection/inspection.h +++ b/astrobee/behaviors/inspection/include/inspection/inspection.h @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -87,7 +88,9 @@ namespace inspection { class Inspection { public: // Constructor - Inspection(ros::NodeHandle* nh, ff_util::ConfigServer cfg); + Inspection(ros::NodeHandle* nh, ff_util::ConfigServer* cfg); + // Read parameters from config server + void ReadParam(); // Generate inspection segment bool GenSegment(geometry_msgs::Pose goal); // Remove head of segment if planing failed @@ -134,6 +137,10 @@ class Inspection { geometry_msgs::PoseArray points_; // Vector containing inspection poses + // Parameter clients + ff_util::ConfigServer *cfg_; + config_reader::ConfigReader cfg_cam_; + // Inspection parameters double opt_distance_; double dist_resolution_; @@ -143,8 +150,15 @@ class Inspection { double min_distance_; double horizontal_fov_; double aspect_ratio_; - double vent_size_x_; - double vent_size_y_; + double target_size_x_; + double target_size_y_; + + // Panorame parameters + double pan_min_; + double pan_max_; + double tilt_min_; + double tilt_max_; + double overlap_; // Publish Markers ros::Publisher pub_no_filter_; diff --git a/astrobee/behaviors/inspection/resources/panorama_granite.txt b/astrobee/behaviors/inspection/resources/panorama_granite.txt new file mode 100644 index 00000000..af8530ba --- /dev/null +++ b/astrobee/behaviors/inspection/resources/panorama_granite.txt @@ -0,0 +1,2 @@ +# Panorama +0 0 -0.772 0 0 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/src/inspection.cc b/astrobee/behaviors/inspection/src/inspection.cc old mode 100755 new mode 100644 index dee28cbc..eb74c9a0 --- a/astrobee/behaviors/inspection/src/inspection.cc +++ b/astrobee/behaviors/inspection/src/inspection.cc @@ -20,7 +20,7 @@ // Include inspection library header #include #define PI 3.1415 - +#define EPS 1e-5 /** * \ingroup beh */ @@ -38,7 +38,14 @@ namespace inspection { It also constains functions that allow inspection visualization. */ - Inspection::Inspection(ros::NodeHandle* nh, ff_util::ConfigServer cfg) { + Inspection::Inspection(ros::NodeHandle* nh, ff_util::ConfigServer* cfg) { + // Setug config readers + cfg_ = cfg; + + cfg_cam_.AddFile("cameras.config"); + if (!cfg_cam_.ReadFiles()) + ROS_FATAL("Failed to read config files."); + // Create a transform buffer to listen for transforms tf_listener_ = std::shared_ptr( new tf2_ros::TransformListener(tf_buffer_)); @@ -61,24 +68,29 @@ namespace inspection { "markers/zones_check", 1, true); pub_map_check_ = nh->advertise( "markers/map_check", 1, true); + } + void Inspection::ReadParam() { // Parameters Anomaly survey - opt_distance_ = cfg.Get("optimal_distance"); - dist_resolution_ = cfg.Get("distance_resolution"); - angle_resolution_ = cfg.Get("angle_resolution"); - max_angle_ = cfg.Get("max_angle"); - max_distance_ = cfg.Get("max_distance"); - min_distance_ = cfg.Get("min_distance"); - horizontal_fov_ = cfg.Get("horizontal_fov"); - aspect_ratio_ = cfg.Get("aspect_ratio"); - vent_size_x_ = cfg.Get("vent_size_x"); - vent_size_y_ = cfg.Get("vent_size_y"); - vent_to_scicam_rot_ = tf2::Quaternion(cfg.Get("vent_to_sci_cam_rotation_x"), - cfg.Get("vent_to_sci_cam_rotation_y"), - cfg.Get("vent_to_sci_cam_rotation_z"), - cfg.Get("vent_to_sci_cam_rotation_w")); - - // Parameters Panorama survey + opt_distance_ = cfg_->Get("optimal_distance"); + dist_resolution_ = cfg_->Get("distance_resolution"); + angle_resolution_ = cfg_->Get("angle_resolution"); + max_angle_ = cfg_->Get("max_angle"); + max_distance_ = cfg_->Get("max_distance"); + min_distance_ = cfg_->Get("min_distance"); + target_size_x_ = cfg_->Get("target_size_x"); + target_size_y_ = cfg_->Get("target_size_y"); + vent_to_scicam_rot_ = tf2::Quaternion(cfg_->Get("vent_to_sci_cam_rotation_x"), + cfg_->Get("vent_to_sci_cam_rotation_y"), + cfg_->Get("vent_to_sci_cam_rotation_z"), + cfg_->Get("vent_to_sci_cam_rotation_w")); + + // Parameters Panorama survey + pan_min_ = cfg_->Get("pan_min") * PI / 180.0; + pan_max_ = cfg_->Get("pan_max") * PI / 180.0; + tilt_min_ = cfg_->Get("tilt_min") * PI / 180.0; + tilt_max_ = cfg_->Get("tilt_max") * PI / 180.0; + overlap_ = cfg_->Get("overlap"); } // Ensure all clients are connected @@ -116,6 +128,8 @@ namespace inspection { // MOVE ACTION CLIENT // Generate inspection segment bool Inspection::GenSegment(geometry_msgs::Pose goal) { + // Update parameters + ReadParam(); // Insert Offset tf2::Transform vent_transform; vent_transform.setOrigin(tf2::Vector3( @@ -214,22 +228,46 @@ namespace inspection { // Checks the given points agains whether the target is visible // from a camera picture bool Inspection::VisibilityConstraint(geometry_msgs::PoseArray &points) { + // Get camera parameters + Eigen::Matrix3d cam_mat; + float fx, fy, s, cx, cy; + int W, H; + + config_reader::ConfigReader::Table camera(&cfg_cam_, points.header.frame_id.c_str()); + // Read in distorted image size. + if (!camera.GetInt("width", &W)) + fprintf(stderr, "Could not read camera width."); + if (!camera.GetInt("height", &H)) + fprintf(stderr, "Could not read camera height."); + + config_reader::ConfigReader::Table vector(&camera, "intrinsic_matrix"); + for (int i = 0; i < 9; i++) { + if (!vector.GetReal((i + 1), &cam_mat(i / 3, i % 3))) { + fprintf(stderr, "Failed to read vector intrinsic_matrix."); + break; + } + } + // Read in focal length, optical offset and skew + fx = cam_mat(0, 0); + fy = cam_mat(1, 1); + s = cam_mat(0, 1); + cx = cam_mat(0, 2); + cy = cam_mat(1, 2); + // Build the matrix with the points to evaluate Eigen::MatrixXd p(4, 4); - p << vent_size_x_, vent_size_x_, -vent_size_x_, -vent_size_x_, - vent_size_y_, -vent_size_y_, vent_size_y_, -vent_size_y_, + p << target_size_x_, target_size_x_, -target_size_x_, -target_size_x_, + target_size_y_, -target_size_y_, target_size_y_, -target_size_y_, 0, 0, 0, 0, 1, 1, 1, 1; - // Build perspective matrix - float yScale = 1.0F / tan(horizontal_fov_ / 2); - float xScale = yScale / aspect_ratio_; + // Build projection matrix float farmnear = max_distance_ - min_distance_; Eigen::Matrix4d P; - P << xScale, 0, 0, 0, - 0, yScale, 0, 0, - 0, 0, max_distance_ / (farmnear), 1, - 0, 0, -min_distance_ * (max_distance_ / (farmnear)), 1; + P << 2*fx/W, 0, 0, 0, + 2*s/W, 2*fy/H, 0, 0, + 2*(cx/W)-1, 2*(cy/H)-1, max_distance_ / (farmnear), 1, + 0, 0, -min_distance_ * (max_distance_ / (farmnear)), 1; // Go through all the points in sorted segment std::vector::const_iterator it = points.poses.begin(); @@ -454,58 +492,103 @@ bool Inspection::PointInsideCuboid(geometry_msgs::Point const& x, publisher.publish(msg_visual); } -void Inspection::DrawInspectionFrostum() { -} + void Inspection::DrawInspectionFrostum() { + } + + // Generate the survey for panorama pictures + void Inspection::GeneratePanoramaSurvey(geometry_msgs::PoseArray &points_panorama) { + // Update parameters + ReadParam(); + + geometry_msgs::PoseArray panorama_relative; + geometry_msgs::PoseArray panorama_transformed; + geometry_msgs::PoseArray panorama_survey; -// Generate the survey for panorama pictures -void Inspection::GeneratePanoramaSurvey(geometry_msgs::PoseArray &points_panorama) { - geometry_msgs::PoseArray panorama_relative; - geometry_msgs::PoseArray panorama_transformed; - geometry_msgs::PoseArray panorama_survey; - - // Insert point - geometry_msgs::Pose point; - panorama_relative.header.frame_id = "sci_cam"; - point.position.x = 0.0; - point.position.y = 0.0; - point.position.z = 0.0; - tf2::Quaternion panorama_rotation; - // Go through all the points - for (double theta = -PI/2; theta <=PI/2; theta += PI/4) { - for (double phi = 0; phi < 2*PI; phi += PI/4) { - panorama_rotation.setRPY(0, theta, phi); - panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * vent_to_scicam_rot_; - point.orientation.x = panorama_rotation.x(); - point.orientation.y = panorama_rotation.y(); - point.orientation.z = panorama_rotation.z(); - point.orientation.w = panorama_rotation.w(); - panorama_relative.poses.push_back(point); - if (theta == -PI/2 || theta == PI/2) + // Insert point + geometry_msgs::Pose point; + panorama_relative.header.frame_id = points_panorama.header.frame_id.c_str(); + point.position.x = 0.0; + point.position.y = 0.0; + point.position.z = 0.0; + tf2::Quaternion panorama_rotation; + + + // Get camera parameters + Eigen::Matrix3d cam_mat; + float fx, fy; + int W, H; + + // Read in distorted image size. + config_reader::ConfigReader::Table camera(&cfg_cam_, points_panorama.header.frame_id.c_str()); + if (!camera.GetInt("width", &W)) { + ROS_ERROR("Could not read camera width."); + } + if (!camera.GetInt("height", &H)) { + ROS_ERROR("Could not read camera height."); + } + config_reader::ConfigReader::Table vector(&camera, "intrinsic_matrix"); + for (int i = 0; i < 9; i++) { + if (!vector.GetReal((i + 1), &cam_mat(i / 3, i % 3))) { + ROS_ERROR("Failed to read vector intrinsic_matrix."); break; + } + } + // Read in focal length + fx = cam_mat(0, 0); + fy = cam_mat(1, 1); + + // Calculate field of views + float h_fov = 2 * atan(W / (2 * fx)); + float v_fov = 2 * atan(H / (2 * fy)); + + // Calculate spacing between pictures + double k_pan = (pan_max_ - pan_min_) / std::ceil((pan_max_ - pan_min_) / (h_fov * (1 - overlap_))); + double k_tilt = (tilt_max_ - tilt_min_) / std::ceil((tilt_max_ - tilt_min_) / (v_fov * (1 - overlap_))); + + // Case where max and min is zero + if (std::isnan(k_pan)) k_pan = PI; + if (std::isnan(k_tilt)) k_tilt = PI; + + // If it's a full 360, skip the last one + if (pan_max_ - pan_min_ >= 2*PI) pan_max_-= 2 * EPS; // Go through all the points + + // Generate all the pan/tilt values + for (double tilt = tilt_min_; tilt <= tilt_max_ + EPS; tilt += k_tilt) { + for (double pan = pan_min_; pan <= pan_max_ + EPS; pan += k_pan) { + ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); + panorama_rotation.setRPY(0, tilt, pan); + panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * vent_to_scicam_rot_; + point.orientation.x = panorama_rotation.x(); + point.orientation.y = panorama_rotation.y(); + point.orientation.z = panorama_rotation.z(); + point.orientation.w = panorama_rotation.w(); + panorama_relative.poses.push_back(point); + if (tilt == -PI/2 || tilt == PI/2) + break; + } } - } - - // Go through all the panorama points - for (int i = 0; i < points_panorama.poses.size(); ++i) { - // Transform the points from the camera reference frame to the robot body - tf2::Transform panorama_pose; - panorama_pose.setOrigin(tf2::Vector3( - points_panorama.poses[i].position.x, - points_panorama.poses[i].position.y, - points_panorama.poses[i].position.z)); - panorama_pose.setRotation(tf2::Quaternion( - points_panorama.poses[i].orientation.x, - points_panorama.poses[i].orientation.y, - points_panorama.poses[i].orientation.z, - points_panorama.poses[i].orientation.w)); - TransformList(panorama_relative, panorama_transformed, panorama_pose); - - panorama_survey.poses.insert(std::end(panorama_survey.poses), std::begin(panorama_transformed.poses), - std::end(panorama_transformed.poses)); + // Go through all the panorama points + for (int i = 0; i < points_panorama.poses.size(); ++i) { + // Transform the points from the camera reference frame to the robot body + + tf2::Transform panorama_pose; + panorama_pose.setOrigin(tf2::Vector3( + points_panorama.poses[i].position.x, + points_panorama.poses[i].position.y, + points_panorama.poses[i].position.z)); + panorama_pose.setRotation(tf2::Quaternion( + points_panorama.poses[i].orientation.x, + points_panorama.poses[i].orientation.y, + points_panorama.poses[i].orientation.z, + points_panorama.poses[i].orientation.w)); + TransformList(panorama_relative, panorama_transformed, panorama_pose); + + panorama_survey.poses.insert(std::end(panorama_survey.poses), std::begin(panorama_transformed.poses), + std::end(panorama_transformed.poses)); + } + points_panorama = panorama_survey; } - points_panorama = panorama_survey; -} } // namespace inspection diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc old mode 100755 new mode 100644 index 3aaee48d..92bd993b --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -325,7 +325,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { max_motion_retry_number_= cfg_.Get("max_motion_retry_number"); // Initiate inspection library - inspection_ = new Inspection(nh, cfg_); + inspection_ = new Inspection(nh, &cfg_); } // Ensure all clients are connected @@ -374,11 +374,6 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { bool validation = cfg_.Get("enable_validation"); bool boostrapping = cfg_.Get("enable_bootstrapping"); bool immediate = cfg_.Get("enable_immediate"); - bool timesync = cfg_.Get("enable_timesync"); - double desired_vel = cfg_.Get("desired_vel"); - double desired_accel = cfg_.Get("desired_accel"); - double desired_omega = cfg_.Get("desired_omega"); - double desired_alpha = cfg_.Get("desired_alpha"); // Reconfigure the choreographer ff_util::ConfigClient choreographer_cfg(GetPlatformHandle(), NODE_CHOREOGRAPHER); @@ -398,11 +393,6 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { choreographer_cfg.Set("enable_validation", validation); choreographer_cfg.Set("enable_bootstrapping", boostrapping); choreographer_cfg.Set("enable_immediate", immediate); - choreographer_cfg.Set("enable_timesync", timesync); - choreographer_cfg.Set("desired_vel", desired_vel); - choreographer_cfg.Set("desired_accel", desired_accel); - choreographer_cfg.Set("desired_omega", desired_omega); - choreographer_cfg.Set("desired_alpha", desired_alpha); if (!choreographer_cfg.Reconfigure()) { NODELET_ERROR_STREAM("Failed to reconfigure choreographer"); return false; @@ -530,37 +520,32 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // Signal an imminent sci cam image sci_cam_req_ = true; - // If using the simulation - if (sim_mode_) { - // Take picture - ff_msgs::CommandArg arg; - std::vector cmd_args; + // Take picture + ff_msgs::CommandArg arg; + std::vector cmd_args; - // The command sends two strings. The first has the app name, - // and the second the command value, encoded as json. + // The command sends two strings. The first has the app name, + // and the second the command value, encoded as json. - arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; - arg.s = "gov.nasa.arc.irg.astrobee.sci_cam_image"; - cmd_args.push_back(arg); + arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; + arg.s = "gov.nasa.arc.irg.astrobee.sci_cam_image"; + cmd_args.push_back(arg); - arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; - arg.s = "{\"name\": \"takeSinglePicture\"}"; - cmd_args.push_back(arg); + arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; + arg.s = "{\"name\": \"takePicture\"}"; + cmd_args.push_back(arg); - ff_msgs::CommandStamped cmd; - cmd.header.stamp = ros::Time::now(); - cmd.cmd_name = ff_msgs::CommandConstants::CMD_NAME_CUSTOM_GUEST_SCIENCE; - cmd.cmd_id = "inspection" + std::to_string(ros::Time::now().toSec()); - cmd.cmd_src = "guest science"; - cmd.cmd_origin = "guest science"; - cmd.args = cmd_args; + ff_msgs::CommandStamped cmd; + cmd.header.stamp = ros::Time::now(); + cmd.cmd_name = ff_msgs::CommandConstants::CMD_NAME_CUSTOM_GUEST_SCIENCE; + cmd.cmd_id = "inspection" + std::to_string(ros::Time::now().toSec()); + cmd.cmd_src = "guest science"; + cmd.cmd_origin = "guest science"; + cmd.args = cmd_args; - pub_guest_sci_.publish(cmd); - } else { - // If using the real robot - FILE *f = popen("ssh -t -t llp /opt/astrobee/ops/sci_cam_img_control.bash single pub_ros", "r"); - } + pub_guest_sci_.publish(cmd); + // Timer for the sci cam camera ros::Timer sci_cam_timeout_ = nh_->createTimer(ros::Duration(5), &InspectionNode::SciCamTimeout, this, true, false); @@ -737,8 +722,10 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // Callback to handle reconfiguration requests bool ReconfigureCallback(dynamic_reconfigure::Config & config) { - if ( fsm_.GetState() == STATE::WAITING) - return cfg_.Reconfigure(config); + if (cfg_.Reconfigure(config)) { + inspection_->ReadParam(); + return true; + } return false; } diff --git a/astrobee/behaviors/inspection/tools/inspection_tool.cc b/astrobee/behaviors/inspection/tools/inspection_tool.cc index 79baf25c..ac5ed55e 100644 --- a/astrobee/behaviors/inspection/tools/inspection_tool.cc +++ b/astrobee/behaviors/inspection/tools/inspection_tool.cc @@ -30,6 +30,8 @@ #include #include #include +#include + // Action #include @@ -63,12 +65,21 @@ DEFINE_bool(geometry, false, "Send the inspection command"); DEFINE_bool(panorama, false, "Send the inspection command"); DEFINE_bool(volumetric, false, "Send the inspection command"); +// Configurable Parameters +DEFINE_string(camera, "sci_cam", "Camera to use"); +DEFINE_double(tilt_max, 90.0, "Panorama: maximum tilt"); +DEFINE_double(tilt_min, -90.0, "Panorama: minimum tilt"); +DEFINE_double(pan_max, 180.0, "Panorama: maximum pan"); +DEFINE_double(pan_min, -180.0, "Panorama: minimum pan"); +DEFINE_double(overlap, 0.5, "Panorama: overlap between images"); + +// Plan files DEFINE_string(anomaly_poses, "/resources/vent_jpm.txt", "Vent pose list to inspect"); DEFINE_string(geometry_poses, "/resources/survey_bay_6.txt", "Geometry poses list to map"); DEFINE_string(panorama_poses, "/resources/panorama_jpm.txt", "Panorama poses list to map"); DEFINE_string(volumetric_poses, "/resources/wifi_jpm.txt", "Wifi poses list to map"); -// Timeout values +// Timeout values for action DEFINE_double(connect, 10.0, "Action connect timeout"); DEFINE_double(active, 10.0, "Action active timeout"); DEFINE_double(response, 200.0, "Action response timeout"); @@ -87,7 +98,7 @@ bool has_only_whitespace_or_comments(const std::string & str) { void ReadFile(std::string file, isaac_msgs::InspectionGoal &goal) { geometry_msgs::Pose pose; - goal.inspect_poses.header.frame_id = "world"; + goal.inspect_poses.header.frame_id = FLAGS_camera; // Read file geometry std::ifstream ifs((file).c_str()); if (!ifs.is_open()) { @@ -303,9 +314,19 @@ int main(int argc, char *argv[]) { std::placeholders::_1, std::placeholders::_2)); client.SetConnectedCallback(std::bind(ConnectedCallback, &client)); client.Create(&nh, ACTION_BEHAVIORS_INSPECTION); - // Print out a status message - std::cout << "\r " - << "\rState: CONNECTING" << std::flush; + // Configure inspection parameters + if (FLAGS_panorama) { + ff_util::ConfigClient cfg(&nh, NODE_INSPECTION); + cfg.Set("pan_min", FLAGS_pan_min); + cfg.Set("pan_max", FLAGS_pan_max); + cfg.Set("tilt_min", FLAGS_tilt_min); + cfg.Set("tilt_max", FLAGS_tilt_max); + cfg.Set("overlap", FLAGS_overlap); + if (!cfg.Reconfigure()) { + std::cout << "Could not reconfigure the inspection node " << std::endl; + ros::shutdown(); + } + } // Synchronous mode ros::spin(); // Finish commandline flags diff --git a/astrobee/simulation/gazebo/CMakeLists.txt b/astrobee/simulation/isaac_gazebo/CMakeLists.txt similarity index 100% rename from astrobee/simulation/gazebo/CMakeLists.txt rename to astrobee/simulation/isaac_gazebo/CMakeLists.txt diff --git a/astrobee/simulation/gazebo/launch/spawn_isaac.launch b/astrobee/simulation/isaac_gazebo/launch/spawn_isaac.launch similarity index 100% rename from astrobee/simulation/gazebo/launch/spawn_isaac.launch rename to astrobee/simulation/isaac_gazebo/launch/spawn_isaac.launch diff --git a/astrobee/simulation/gazebo/launch/spawn_object.launch b/astrobee/simulation/isaac_gazebo/launch/spawn_object.launch similarity index 100% rename from astrobee/simulation/gazebo/launch/spawn_object.launch rename to astrobee/simulation/isaac_gazebo/launch/spawn_object.launch diff --git a/astrobee/simulation/gazebo/launch/start_simulation.launch b/astrobee/simulation/isaac_gazebo/launch/start_simulation.launch similarity index 100% rename from astrobee/simulation/gazebo/launch/start_simulation.launch rename to astrobee/simulation/isaac_gazebo/launch/start_simulation.launch diff --git a/astrobee/simulation/gazebo/package.xml b/astrobee/simulation/isaac_gazebo/package.xml similarity index 100% rename from astrobee/simulation/gazebo/package.xml rename to astrobee/simulation/isaac_gazebo/package.xml diff --git a/astrobee/simulation/gazebo/readme.md b/astrobee/simulation/isaac_gazebo/readme.md similarity index 100% rename from astrobee/simulation/gazebo/readme.md rename to astrobee/simulation/isaac_gazebo/readme.md diff --git a/astrobee/simulation/gazebo/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc diff --git a/astrobee/simulation/gazebo/worlds/granite.world b/astrobee/simulation/isaac_gazebo/worlds/granite.world similarity index 100% rename from astrobee/simulation/gazebo/worlds/granite.world rename to astrobee/simulation/isaac_gazebo/worlds/granite.world diff --git a/astrobee/simulation/gazebo/worlds/iss.world b/astrobee/simulation/isaac_gazebo/worlds/iss.world similarity index 100% rename from astrobee/simulation/gazebo/worlds/iss.world rename to astrobee/simulation/isaac_gazebo/worlds/iss.world diff --git a/astrobee/simulation/gazebo/worlds/r2.world b/astrobee/simulation/isaac_gazebo/worlds/r2.world similarity index 100% rename from astrobee/simulation/gazebo/worlds/r2.world rename to astrobee/simulation/isaac_gazebo/worlds/r2.world diff --git a/cmake/catkin2Config.cmake b/cmake/catkin2Config.cmake index 3229a1d0..90bd6f89 100644 --- a/cmake/catkin2Config.cmake +++ b/cmake/catkin2Config.cmake @@ -1,15 +1,14 @@ -# Copyright (c) 2021, United States Government, as represented by the +# Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# -# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking -# platform" software is licensed under the Apache License, Version 2.0 +# +# The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -28,6 +27,7 @@ # :outvar _INCLUDE_DIRS/_LIBRARY_DIRS/_LIBRARY: # contains the include dirs / library dirs / libraries of the searched component . +find_package(catkin REQUIRED) # defines catkin_DIR if(CATKIN_TOPLEVEL_FIND_PACKAGE OR NOT CATKIN_TOPLEVEL) set(catkin_EXTRAS_DIR "${catkin_DIR}") @@ -117,8 +117,8 @@ if(catkin2_FIND_COMPONENTS) # paths. This makes it impossible to cross compile unless we fix # the paths like so below. if (USE_CTC) - if (${component}_LIBRARIES) - set(temp_LIBRARIES) + if (${component}_LIBRARIES) + set(temp_LIBRARIES) foreach(library ${${component}_LIBRARIES}) string(REGEX REPLACE "^/usr/lib" "${ARM_CHROOT_DIR}/usr/lib" library ${library}) string(REPLACE "i386-linux-gnu" "arm-linux-gnueabihf" library ${library}) diff --git a/isaac/CMakeLists.txt b/isaac/CMakeLists.txt index 4065e6f0..07e40f6e 100644 --- a/isaac/CMakeLists.txt +++ b/isaac/CMakeLists.txt @@ -39,7 +39,7 @@ catkin_package( ############# install(DIRECTORY config/ DESTINATION config) -install(FILES scripts/env_wrapper.sh DESTINATION . ) +install(PROGRAMS scripts/env_wrapper.sh DESTINATION . ) install(PROGRAMS scripts/apk_print_version.sh scripts/print_version.sh scripts/cpu_print_version.sh diff --git a/isaac/config/behaviors/inspection.config b/isaac/config/behaviors/inspection.config index 6069747e..436035e6 100644 --- a/isaac/config/behaviors/inspection.config +++ b/isaac/config/behaviors/inspection.config @@ -112,14 +112,7 @@ parameters = { unit = "boolean", description = "Shift timestamps to start immediately?" },{ - id = "enable_timesync", - reconfigurable = false, - type = "boolean", - default = false, - unit = "boolean", - description = "Shift timestamps by DTU + (UTC % DTU) to enforce sync" - },{ - id = "enable_faceforward_vent", + id = "enable_faceforward_anomaly", reconfigurable = false, type = "boolean", default = true, @@ -132,42 +125,6 @@ parameters = { default = false, unit = "boolean", description = "Should planning be face-forward only?" - },{ - id = "desired_vel", - reconfigurable = true, - type = "double", - default = -1.0, - min = -1.0, - max = 1.0, - unit = "m/s", - description = "Soft planning limit on net linear velocity" - },{ - id = "desired_accel", - reconfigurable = true, - type = "double", - default = -1.0, - min = -1.0, - max = 1.0, - unit = "m/s", - description = "Soft planning limit on net linear acceleration" - },{ - id = "desired_omega", - reconfigurable = true, - type = "double", - default = -1.0, - min = -1.0, - max = 1.0, - unit = "m/s", - description = "Soft planning limit on net angular velocity" - },{ - id = "desired_alpha", - reconfigurable = true, - type = "double", - default = -1.0, - min = -1.0, - max = 1.0, - unit = "m/s", - description = "Soft planning limit on net angular acceleration" }, -- INSPECTION CONFIG PARAMETERS { @@ -261,25 +218,7 @@ parameters = { unit = "m", description = "Maximum distance between camera and target" },{ - id = "horizontal_fov", - reconfigurable = true, - type = "double", - default = 2.957216683909311, - min = 1.0, - max = 3.0, - unit = "rad", - description = "Camera horizontal field of view" - },{ - id = "aspect_ratio", - reconfigurable = true, - type = "double", - default = 1.0, - min = 1.0, - max = 3.0, - unit = "rad", - description = "Camera Aspect Ratio" - },{ - id = "vent_size_x", + id = "target_size_x", reconfigurable = true, type = "double", default = 0.5, @@ -288,7 +227,7 @@ parameters = { unit = "rad", description = "Size of vent in the x-axis if z is pointing forward" },{ - id = "vent_size_y", + id = "target_size_y", reconfigurable = true, type = "double", default = 0.5, @@ -305,6 +244,51 @@ parameters = { max = 20, unit = "integer", description = "Number of times the inspection will retry to reach a pose" + },{ + id = "pan_min", + reconfigurable = true, + type = "double", + default = -180, + min = -180, + max = 0, + unit = "degrees", + description = "Minimum robot pan angle" + },{ + id = "pan_max", + reconfigurable = true, + type = "double", + default = 180, + min = 0, + max = 180, + unit = "degrees", + description = "Maximum robot pan angle" + },{ + id = "tilt_min", + reconfigurable = true, + type = "double", + default = -90, + min = -90, + max = 0, + unit = "degrees", + description = "Minimum robot tilt angle" + },{ + id = "tilt_max", + reconfigurable = true, + type = "double", + default = 90, + min = 0, + max = 90, + unit = "degrees", + description = "Maximum robot tilt angle" + },{ + id = "overlap", + reconfigurable = true, + type = "double", + default = 0.5, + min = 0.0, + max = 1.0, + unit = "percentage", + description = "Overlap between consecutive images" }} \ No newline at end of file diff --git a/isaac/launch/robot/ILP.launch b/isaac/launch/robot/ILP.launch index ccd325d0..10deca97 100644 --- a/isaac/launch/robot/ILP.launch +++ b/isaac/launch/robot/ILP.launch @@ -97,16 +97,6 @@ - - - - - - - - - - From d9083f9328c87b0c63072c5e3e626d97b244a357 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Mon, 13 Dec 2021 14:06:29 -0800 Subject: [PATCH 10/40] Panorama surveys (#6) * reset bias at startup * adding tolerance +-Pi/2 tilt; adding correct survey name * soundsee panorama --- astrobee/behaviors/inspection/resources/panorama_jpm.txt | 2 -- .../inspection/resources/soundsee_backround_noise.txt | 2 ++ astrobee/behaviors/inspection/resources/soundsee_panorama.txt | 2 ++ astrobee/behaviors/inspection/src/inspection.cc | 2 +- isaac/launch/sim.launch | 3 +++ 5 files changed, 8 insertions(+), 3 deletions(-) delete mode 100644 astrobee/behaviors/inspection/resources/panorama_jpm.txt create mode 100644 astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt create mode 100644 astrobee/behaviors/inspection/resources/soundsee_panorama.txt diff --git a/astrobee/behaviors/inspection/resources/panorama_jpm.txt b/astrobee/behaviors/inspection/resources/panorama_jpm.txt deleted file mode 100644 index c102286f..00000000 --- a/astrobee/behaviors/inspection/resources/panorama_jpm.txt +++ /dev/null @@ -1,2 +0,0 @@ -# Panorama -10.5 -8 5.0 0 0 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt b/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt new file mode 100644 index 00000000..d3dd47a1 --- /dev/null +++ b/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt @@ -0,0 +1,2 @@ +# Panorama +11 -9 5.5 0 -90 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_panorama.txt b/astrobee/behaviors/inspection/resources/soundsee_panorama.txt new file mode 100644 index 00000000..46e20d03 --- /dev/null +++ b/astrobee/behaviors/inspection/resources/soundsee_panorama.txt @@ -0,0 +1,2 @@ +# Panorama +11 -9 5.0 0 0 180 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/src/inspection.cc b/astrobee/behaviors/inspection/src/inspection.cc index eb74c9a0..93678730 100644 --- a/astrobee/behaviors/inspection/src/inspection.cc +++ b/astrobee/behaviors/inspection/src/inspection.cc @@ -563,7 +563,7 @@ bool Inspection::PointInsideCuboid(geometry_msgs::Point const& x, point.orientation.z = panorama_rotation.z(); point.orientation.w = panorama_rotation.w(); panorama_relative.poses.push_back(point); - if (tilt == -PI/2 || tilt == PI/2) + if ((tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) || (tilt < PI/2 + EPS && tilt > PI/2 - EPS)) break; } } diff --git a/isaac/launch/sim.launch b/isaac/launch/sim.launch index ae1557c4..a9b9b42c 100644 --- a/isaac/launch/sim.launch +++ b/isaac/launch/sim.launch @@ -287,4 +287,7 @@ + + + From f7ec3dc7aa8fa7a09645e7f9d151b23a5b3c1e4b Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Tue, 14 Dec 2021 13:23:13 -0800 Subject: [PATCH 11/40] support texturing haz cam and color images everywhere in the geometry mapper (#8) * dense_map: Minor harderning of tools and documentation improvements * dense_map: Minor harderning of tools and documentation improvements * First cut at improved refiner * Make lines shorter in clang * More refiner work * Impement depth error * Implement triangulation * Complete setting up the problem * refiner2 add depth for ref cam * Match features using multiple threads * Match features using multiple threads * Refiner: Finish setting up the opt problem * Fix a nasty bug with precision loss in large timestamps * Minor tweak * Add tool to colorize bag images, important for testing support for color * Camera refiner2: Improved robustness * Bugfix for reading an image from a bag, copy to permanent storage * Geometry mapper: Support color throughout * clang-format: Make lines at most 100 characters, easier to manage shorter lines * Support the haz cam texturing in the geometry mapper * dense_map: Spellcheck and style * Fix python style Fix python style * .clang-format: Make it back 120 lines --- dense_map/geometry_mapper/CMakeLists.txt | 8 + .../include/dense_map_ros_utils.h | 7 + .../geometry_mapper/include/dense_map_utils.h | 14 +- dense_map/geometry_mapper/readme.md | 27 +- .../src/dense_map_ros_utils.cc | 108 +- .../geometry_mapper/src/dense_map_utils.cc | 34 +- .../geometry_mapper/src/interest_point.cc | 23 +- .../geometry_mapper/tools/camera_refiner.cc | 216 +- .../geometry_mapper/tools/camera_refiner2.cc | 2151 +++++++++++++++++ .../tools/colorize_bag_images.cc | 153 ++ .../tools/extract_pc_from_bag.cc | 11 +- .../geometry_mapper/tools/geometry_mapper.cc | 1093 +++++---- .../geometry_mapper/tools/geometry_mapper.py | 24 +- .../geometry_mapper/tools/image_picker.cc | 4 +- 14 files changed, 3181 insertions(+), 692 deletions(-) create mode 100644 dense_map/geometry_mapper/tools/camera_refiner2.cc create mode 100644 dense_map/geometry_mapper/tools/colorize_bag_images.cc diff --git a/dense_map/geometry_mapper/CMakeLists.txt b/dense_map/geometry_mapper/CMakeLists.txt index adf10910..6823fdb6 100644 --- a/dense_map/geometry_mapper/CMakeLists.txt +++ b/dense_map/geometry_mapper/CMakeLists.txt @@ -175,6 +175,10 @@ add_executable(camera_refiner tools/camera_refiner.cc) target_link_libraries(camera_refiner geometry_mapper_lib gflags glog) +add_executable(camera_refiner2 tools/camera_refiner2.cc) +target_link_libraries(camera_refiner2 + geometry_mapper_lib gflags glog) + add_executable(extract_pc_from_bag tools/extract_pc_from_bag.cc) target_link_libraries(extract_pc_from_bag geometry_mapper_lib gflags glog) @@ -187,6 +191,10 @@ add_executable(process_bag tools/process_bag.cc) target_link_libraries(process_bag geometry_mapper_lib gflags glog) +add_executable(colorize_bag_images tools/colorize_bag_images.cc) +target_link_libraries(colorize_bag_images + geometry_mapper_lib gflags glog) + add_executable(test_correct tools/test_correct.cc) target_link_libraries(test_correct geometry_mapper_lib gflags glog) diff --git a/dense_map/geometry_mapper/include/dense_map_ros_utils.h b/dense_map/geometry_mapper/include/dense_map_ros_utils.h index c647d94b..bd89a9c0 100644 --- a/dense_map/geometry_mapper/include/dense_map_ros_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_ros_utils.h @@ -50,6 +50,13 @@ void readBagPoses(std::string const& bag_file, std::string const& topic, Stamped void readBagImageTimestamps(std::string const& bag_file, std::string const& topic, std::vector& timestamps); +// Given a bag view, for each topic in the view read the vector of +// messages for that topic, sorted by message header timestamp. Only +// the following sensor types are supported: sensor_msgs::Image, +// sensor_msgs::CompressedImage, and sensor_msgs::PointCloud2. +void indexMessages(rosbag::View& view, // view can't be made const + std::map>& bag_map); + void readExifFromBag(std::vector const& bag_msgs, std::map>& exif); // Find an image at the given timestamp or right after it. We assume diff --git a/dense_map/geometry_mapper/include/dense_map_utils.h b/dense_map/geometry_mapper/include/dense_map_utils.h index 080bf5ab..24b03d0c 100644 --- a/dense_map/geometry_mapper/include/dense_map_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_utils.h @@ -41,7 +41,7 @@ namespace dense_map { -const int NUM_SCALE_PARAMS = 1; +const int NUM_SCALAR_PARAMS = 1; const int NUM_OPT_CTR_PARAMS = 2; // optical center in x and y const int NUM_RESIDUALS = 2; // Same as pixel size const int NUM_XYZ_PARAMS = 3; @@ -190,15 +190,21 @@ void pickTimestampsInBounds(std::vector const& timestamps, double left_b // Must always have NUM_EXIF the last. enum ExifData { TIMESTAMP = 0, EXPOSURE_TIME, ISO, APERTURE, FOCAL_LENGTH, NUM_EXIF }; -// Triangulate rays emanating from given undistorted and centered pixels +// Triangulate two rays emanating from given undistorted and centered pixels Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eigen::Affine3d const& world_to_cam1, Eigen::Affine3d const& world_to_cam2, Eigen::Vector2d const& pix1, Eigen::Vector2d const& pix2); -// A debug utility for saving a camera in a format ASP understands. +// Triangulate n rays emanating from given undistorted and centered pixels +Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, + std::vector const& world_to_cam_vec, + std::vector const& pix_vec); + +// A utility for saving a camera in a format ASP understands. // TODO(oalexan1): Expose the sci cam intrinsics rather than having // them hard-coded. -void save_tsai_camera(Eigen::MatrixXd const& desired_cam_to_world_trans, std::string const& output_dir, +void save_tsai_camera(Eigen::MatrixXd const& desired_cam_to_world_trans, + std::string const& output_dir, double curr_time, std::string const& suffix); } // namespace dense_map diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 15ca4a8d..84dfa173 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -287,17 +287,17 @@ robots using some established infrastructure. Alternatively, one can use `adb pull`. After this tool is used, the data can be manually deleted from HLP by first connecting to it with `adb shell`.) -In order to perform calibration, the sci cam data in the bag needs to +In order to run camera_calibrator, the sci cam data in the bag needs to be decompressed, resized to 1/4 of the resolution, and made to be grayscale. This is needed since the sci cam images are a little blurry and, at full resolution, the corners of the calibration target are hard to detect. The calibrator also does not handle the color format in the bag, hence the switch to grayscale images. -However, the both the geometry and the streaming mapper can handle -both color and grayscale images, and both at reduced resolution and -full-resolution. These tools can adjust for the fact that the -calibration was done at reduced resolution. +However, both the geometry and the streaming mapper, as well as +camera_refiner, can handle both color and grayscale images, and both +at reduced resolution and full-resolution. These tools can adjust for +the fact that the calibration was done at reduced resolution. To accomplish this processing, once the sci cam data is integrated into the bag, one can do the following: @@ -561,6 +561,8 @@ Nav cam images can be extracted from a bag as follows: -use_timestamp_as_image_name The last option, `-use_timestamp_as_image_name`, must not be missed. +It makes it easy to look up the image acquisition based on image name, +and this is used by the geometry mapper. Note that bags acquired on the ISS usually have the nav cam image topic as: @@ -1084,12 +1086,14 @@ sci cam image using the tool: export ASTROBEE_ROBOT=bsharp2 source $ASTROBEE_BUILD_PATH/devel/setup.bash source $ISAAC_WS/devel/setup.bash - $ISAAC_WS/devel/lib/geometry_mapper/image_picker \ - --ros_bag mybag.bag \ - --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ + $ISAAC_WS/devel/lib/geometry_mapper/image_picker \ + --ros_bag mybag.bag \ + --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ + --sci_cam_topic /hw/cam_sci/compressed \ + --haz_cam_intensity_topic /hw/depth_haz/extended/amplitude_int \ --bracket_len 2.0 --output_nav_cam_dir nav_images -Setting up the correct bot name is very important. +Setting up the correct robot name above is very important. The --bracket_len option brackets sci cam images by nav cam images so the length of time between these two nav cam images is at most this @@ -1187,6 +1191,7 @@ and hence keeping its registration, as: $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/merge_maps \ --fix_first_map \ --num_image_overlaps_at_endpoints 200 \ + --min_valid_angle 1.0 \ registered_submap.map $surf_map \ --output_map merged.map @@ -1220,7 +1225,7 @@ point to that. --fix_map --skip_registration --float_scale \ --timestamp_interpolation --robust_threshold 3 \ --sci_cam_intrinsics_to_float \ - 'focal_length optical center distortion' \ + 'focal_length optical_center distortion' \ --mesh mesh.ply --mesh_weight 25 \ --mesh_robust_threshold 3 @@ -1302,7 +1307,7 @@ This program's options are: --sci_cam_intrinsics_to_float: Refine 0 or more of the following intrinsics for sci_cam: focal_length, optical_center, distortion. Specify as a quoted list. For example: - 'focal_length optical center distortion'. + 'focal_length optical_center distortion'. --timestamp_interpolation: If true, interpolate between timestamps. May give better results if the robot is known to move uniformly, and perhaps less so for stop-and-go robot motion. diff --git a/dense_map/geometry_mapper/src/dense_map_ros_utils.cc b/dense_map/geometry_mapper/src/dense_map_ros_utils.cc index e9c31d98..23e74a3e 100644 --- a/dense_map/geometry_mapper/src/dense_map_ros_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_ros_utils.cc @@ -118,6 +118,53 @@ void readBagImageTimestamps(std::string const& bag_file, std::string const& topi } } +// Given a bag view, for each topic in the view read the vector of +// messages for that topic, sorted by message header timestamp. Only +// the following sensor types are supported: sensor_msgs::Image, +// sensor_msgs::CompressedImage, and sensor_msgs::PointCloud2. +void indexMessages(rosbag::View& view, // view can't be made const + std::map>& bag_map) { + bag_map.clear(); + + // First put the data in maps so that we can sort it + std::map> local_map; + for (rosbag::MessageInstance const m : view) { + // Check for regular image message + sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); + if (image_msg) { + double curr_time = image_msg->header.stamp.toSec(); + local_map[m.getTopic()].insert(std::make_pair(curr_time, m)); + continue; + } + + // Check for compressed image message + sensor_msgs::CompressedImage::ConstPtr comp_image_msg = + m.instantiate(); + if (comp_image_msg) { + double curr_time = comp_image_msg->header.stamp.toSec(); + local_map[m.getTopic()].insert(std::make_pair(curr_time, m)); + continue; + } + + // Check for cloud + sensor_msgs::PointCloud2::ConstPtr pc_msg = m.instantiate(); + if (pc_msg) { + double curr_time = pc_msg->header.stamp.toSec(); + local_map[m.getTopic()].insert(std::make_pair(curr_time, m)); + continue; + } + } + + // Add the data in sorted order + for (auto topic_it = local_map.begin(); topic_it != local_map.end(); topic_it++) { + auto& topic_name = topic_it->first; // alias + auto& topic_map = topic_it->second; // alias + + for (auto msg_it = topic_map.begin(); msg_it != topic_map.end() ; msg_it++) + bag_map[topic_name].push_back(msg_it->second); + } +} + // Read exif data from a bag void readExifFromBag(std::vector const& bag_msgs, std::map >& exif) { @@ -142,8 +189,8 @@ void readExifFromBag(std::vector const& bag_msgs, // that during repeated calls to this function we always travel // forward in time, and we keep track of where we are in the bag using // the variable bag_pos that we update as we go. -bool lookupImage(double desired_time, std::vector const& bag_msgs, bool save_grayscale, - cv::Mat& image, int& bag_pos, double& found_time) { +bool lookupImage(double desired_time, std::vector const& bag_msgs, + bool save_grayscale, cv::Mat& image, int& bag_pos, double& found_time) { found_time = -1.0; // Record the time at which the image was found int num_msgs = bag_msgs.size(); double prev_image_time = -1.0; @@ -152,27 +199,35 @@ bool lookupImage(double desired_time, std::vector const bag_pos = local_pos; // save this for exporting // Check for uncompressed images - sensor_msgs::Image::ConstPtr image_msg = bag_msgs[local_pos].instantiate(); + sensor_msgs::Image::ConstPtr image_msg + = bag_msgs[local_pos].instantiate(); if (image_msg) { ros::Time stamp = image_msg->header.stamp; found_time = stamp.toSec(); // Sanity check: We must always travel forward in time - if (found_time < prev_image_time) LOG(FATAL) << "Time in the bag must be increasing."; + if (found_time < prev_image_time) { + LOG(ERROR) << "Found images in a bag not in chronological order. Caution advised.\n" + << std::fixed << std::setprecision(17) + << "Times in wrong order: " << prev_image_time << ' ' << found_time << ".\n"; + continue; + } prev_image_time = found_time; if (found_time >= desired_time) { try { if (!save_grayscale) { - image = cv_bridge::toCvShare(image_msg, "bgr8")->image; + // Do a copy, as image_msg may soon run out of scope + (cv_bridge::toCvShare(image_msg, "bgr8")->image).copyTo(image); } else { - // for some reason, looking up the image as grayscale does not work, + // For some reason, looking up the image as grayscale does not work, // so first it needs to be looked up as color and then converted. cv::Mat tmp_image = cv_bridge::toCvShare(image_msg, "bgr8")->image; cv::cvtColor(tmp_image, image, cv::COLOR_BGR2GRAY); } } catch (cv_bridge::Exception const& e) { - ROS_ERROR_STREAM("Unable to convert " << image_msg->encoding.c_str() << " image to bgr8."); + ROS_ERROR_STREAM("Unable to convert " << image_msg->encoding.c_str() + << " image to bgr8."); return false; } return true; @@ -187,7 +242,12 @@ bool lookupImage(double desired_time, std::vector const found_time = stamp.toSec(); // Sanity check: We must always travel forward in time - if (found_time < prev_image_time) LOG(FATAL) << "Time in the bag must be increasing."; + if (found_time < prev_image_time) { + LOG(ERROR) << "Found images in a bag not in chronological order. Caution advised.\n" + << std::fixed << std::setprecision(17) + << "Times in wrong order: " << prev_image_time << ' ' << found_time << ".\n"; + continue; + } prev_image_time = found_time; if (found_time >= desired_time) { @@ -209,14 +269,14 @@ bool lookupImage(double desired_time, std::vector const return false; } -// Find the closest depth cloud to given timestamp. Return an empty -// one if one cannot be found closer in time than max_time_diff. -// Store it as a cv::Mat of vec3f values. We assume that during -// repeated calls to this function we always travel forward in time, -// and we keep track of where we are in the bag using the variable -// bag_pos that we update as we go. -bool lookupCloud(double desired_time, std::vector const& bag_msgs, double max_time_diff, - cv::Mat& cloud, int& bag_pos, double& found_time) { +// Find the closest depth cloud to given timestamp (before or after +// it). Return an empty one if one cannot be found closer in time than +// max_time_diff. Store it as a cv::Mat of vec3f values. We assume +// that during repeated calls to this function we always travel +// forward in time, and we keep track of where we are in the bag using +// the variable bag_pos that we update as we go. +bool lookupCloud(double desired_time, std::vector const& bag_msgs, + double max_time_diff, cv::Mat& cloud, int& bag_pos, double& found_time) { int num_msgs = bag_msgs.size(); double prev_time = -1.0; found_time = -1.0; @@ -227,12 +287,18 @@ bool lookupCloud(double desired_time, std::vector const for (int local_pos = bag_pos; local_pos < num_msgs; local_pos++) { // Check for cloud - sensor_msgs::PointCloud2::ConstPtr curr_pc_msg = bag_msgs[local_pos].instantiate(); + sensor_msgs::PointCloud2::ConstPtr curr_pc_msg + = bag_msgs[local_pos].instantiate(); if (!curr_pc_msg) continue; double curr_time = curr_pc_msg->header.stamp.toSec(); // Sanity check: We must always travel forward in time - if (curr_time < prev_time) LOG(FATAL) << "Time in the bag must be increasing."; + if (curr_time < prev_time) { + LOG(ERROR) << "Found images in a bag not in chronological order. Caution advised.\n" + << std::fixed << std::setprecision(17) + << "Times in wrong order: " << prev_time << ' ' << curr_time << ".\n"; + continue; + } // We are not yet at the stage where we can make decisions, so just keep on going if (curr_time <= desired_time) { @@ -261,14 +327,16 @@ bool lookupCloud(double desired_time, std::vector const // Decode the cloud pcl::PointCloud pc; pcl::fromROSMsg(*curr_pc_msg, pc); - if (static_cast(pc.points.size()) != static_cast(curr_pc_msg->width * curr_pc_msg->height)) + if (static_cast(pc.points.size()) != + static_cast(curr_pc_msg->width * curr_pc_msg->height)) LOG(FATAL) << "Extracted point cloud size does not agree with original size."; cloud = cv::Mat::zeros(curr_pc_msg->height, curr_pc_msg->width, CV_32FC3); for (int row = 0; row < curr_pc_msg->height; row++) { for (int col = 0; col < curr_pc_msg->width; col++) { int count = row * curr_pc_msg->width + col; - cloud.at(row, col) = cv::Vec3f(pc.points[count].x, pc.points[count].y, pc.points[count].z); + cloud.at(row, col) + = cv::Vec3f(pc.points[count].x, pc.points[count].y, pc.points[count].z); } } diff --git a/dense_map/geometry_mapper/src/dense_map_utils.cc b/dense_map/geometry_mapper/src/dense_map_utils.cc index c330334d..29ff99d4 100644 --- a/dense_map/geometry_mapper/src/dense_map_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_utils.cc @@ -845,8 +845,10 @@ void pickTimestampsInBounds(std::vector const& timestamps, double left_b } // Triangulate rays emanating from given undistorted and centered pixels -Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eigen::Affine3d const& world_to_cam1, - Eigen::Affine3d const& world_to_cam2, Eigen::Vector2d const& pix1, +Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, + Eigen::Affine3d const& world_to_cam1, + Eigen::Affine3d const& world_to_cam2, + Eigen::Vector2d const& pix1, Eigen::Vector2d const& pix2) { Eigen::Matrix3d k1; k1 << focal_length1, 0, 0, 0, focal_length1, 0, 0, 0, 1; @@ -866,6 +868,34 @@ Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eige return solution; } +// Triangulate n rays emanating from given undistorted and centered pixels +Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, + std::vector const& world_to_cam_vec, + std::vector const& pix_vec) { + if (focal_length_vec.size() != world_to_cam_vec.size() || + focal_length_vec.size() != pix_vec.size()) + LOG(FATAL) << "All inputs to Triangulate() must have the same size."; + + if (focal_length_vec.size() <= 1) + LOG(FATAL) << "At least two rays must be passed to Triangulate()."; + + openMVG::Triangulation tri; + + for (size_t it = 0; it < focal_length_vec.size(); it++) { + Eigen::Matrix3d k; + k << focal_length_vec[it], 0, 0, 0, focal_length_vec[it], 0, 0, 0, 1; + + openMVG::Mat34 cid_to_p; + openMVG::P_From_KRt(k, world_to_cam_vec[it].linear(), world_to_cam_vec[it].translation(), + &cid_to_p); + + tri.add(cid_to_p, pix_vec[it]); + } + + Eigen::Vector3d solution = tri.compute(); + return solution; +} + // A debug utility for saving a camera in a format ASP understands. // Need to expose the sci cam intrinsics. void save_tsai_camera(Eigen::MatrixXd const& desired_cam_to_world_trans, std::string const& output_dir, diff --git a/dense_map/geometry_mapper/src/interest_point.cc b/dense_map/geometry_mapper/src/interest_point.cc index 12cd4155..7cef6c1c 100644 --- a/dense_map/geometry_mapper/src/interest_point.cc +++ b/dense_map/geometry_mapper/src/interest_point.cc @@ -89,7 +89,8 @@ void detectFeatures(const cv::Mat& image, bool verbose, // This really likes haz cam first and nav cam second void matchFeatures(std::mutex* match_mutex, int left_image_index, int right_image_index, cv::Mat const& left_descriptors, cv::Mat const& right_descriptors, - Eigen::Matrix2Xd const& left_keypoints, Eigen::Matrix2Xd const& right_keypoints, bool verbose, + Eigen::Matrix2Xd const& left_keypoints, + Eigen::Matrix2Xd const& right_keypoints, bool verbose, // output MATCH_PAIR* matches) { std::vector cv_matches; @@ -108,17 +109,17 @@ void matchFeatures(std::mutex* match_mutex, int left_image_index, int right_imag if (left_vec.empty()) return; - // These may need some tweaking but work reasonably well. + // These may need some tweaking but works reasonably well. double ransacReprojThreshold = 20.0; cv::Mat inlier_mask; int maxIters = 10000; double confidence = 0.8; + // affine2D works better than homography // cv::Mat H = cv::findHomography(left_vec, right_vec, cv::RANSAC, - // ransacReprojThreshold, - // inlier_mask, maxIters, confidence); - cv::Mat H = - cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, ransacReprojThreshold, maxIters, confidence); + // ransacReprojThreshold, inlier_mask, maxIters, confidence); + cv::Mat H = cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, + ransacReprojThreshold, maxIters, confidence); std::vector left_ip, right_ip; for (size_t j = 0; j < cv_matches.size(); j++) { @@ -140,10 +141,16 @@ void matchFeatures(std::mutex* match_mutex, int left_image_index, int right_imag right_ip.push_back(right); } - if (verbose) std::cout << "Number of matches: " << left_ip.size() << std::endl; - // Update the shared variable using a lock match_mutex->lock(); + + // Print the verbose message inside the lock, otherwise the text + // may get messed up. + if (verbose) + std::cout << "Number of matches for pair " + << left_image_index << ' ' << right_image_index << ": " + << left_ip.size() << std::endl; + *matches = std::make_pair(left_ip, right_ip); match_mutex->unlock(); } diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index 5dd504f3..56dc9d9e 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -17,12 +17,9 @@ * under the License. */ -// TODO(oalexan1): Must not use haz cam frames outside the bracket. -// TODO(oalexan1): In the refiner, get a max bracket size. // TODO(oalexan1): Consider adding a haz cam to haz cam // reprojection error in the camera refiner. There will be more // haz to haz matches than haz to nav or haz to sci. - #include #include #include @@ -144,7 +141,7 @@ DEFINE_bool(float_scale, false, DEFINE_string(sci_cam_intrinsics_to_float, "", "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical center'."); + "For example: 'focal_length optical_center'."); DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, std::numeric_limits::quiet_NaN(), @@ -170,38 +167,8 @@ DEFINE_bool(verbose, false, namespace dense_map { - // TODO(oalexan1): Store separately matches which end up being - // squashed in a pid_cid_to_fid. - - // A class to encompass all known information about a camera - // This is work in progress and will replace some of the logic further down. - struct cameraImage { - // An index to look up the type of camera. This will equal - // the value ref_camera_type if and only if this is a reference camera. - int camera_type; - - // The timestamp for this camera (in floating point seconds since epoch) - double timestamp; - - // Indices to look up the left and right reference cameras bracketing this camera - // in time. The two indices will have same value if and only if this is a reference - // camera. - int left_ref_cam_index; - int right_ref_ca_index; - - // Indices to the reference camera timestamps bracketing this timestamp in time. - // The two indices will have the same value if and only if this is a reference - // camera. - int left_ref_timestamp_index; - int right_ref_timestamp_index; - - // Index used to look up the reference camera to given camera rigid transform. - // That transform is the identity if and only if this is a reference camera. - int ref_cam_to_cam_index; - - // The image for this camera, in grayscale - cv::Mat image; - }; +// TODO(oalexan1): Store separately matches which end up being +// squashed in a pid_cid_to_fid. ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { // Convert to lower-case @@ -230,7 +197,7 @@ struct DepthToHazError { camera::CameraParameters const& haz_cam_params) : m_haz_pix(haz_pix), m_depth_xyz(depth_xyz), m_block_sizes(block_sizes), m_haz_cam_params(haz_cam_params) { // Sanity check. - if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_SCALE_PARAMS) { + if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_SCALAR_PARAMS) { LOG(FATAL) << "DepthToHazError: The block sizes were not set up properly.\n"; } } @@ -371,7 +338,7 @@ struct DepthToNavError { // Sanity check. if (m_block_sizes.size() != 5 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_SCALE_PARAMS) { + m_block_sizes[4] != NUM_SCALAR_PARAMS) { LOG(FATAL) << "DepthToNavError: The block sizes were not set up properly.\n"; } } @@ -475,7 +442,7 @@ struct DepthToSciError { // Sanity check. if (m_block_sizes.size() != 9 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_RIGID_PARAMS || m_block_sizes[5] != NUM_SCALE_PARAMS || + m_block_sizes[4] != NUM_RIGID_PARAMS || m_block_sizes[5] != NUM_SCALAR_PARAMS || m_block_sizes[6] != m_num_focal_lengths || m_block_sizes[7] != NUM_OPT_CTR_PARAMS || m_block_sizes[8] != m_sci_cam_params.GetDistortion().size()) { LOG(FATAL) << "DepthToSciError: The block sizes were not set up properly.\n"; @@ -800,27 +767,28 @@ void calc_median_residuals(std::vector const& residuals, } // Prevent the linter from messing up with the beautiful formatting below - void add_haz_nav_cost(// Inputs // NOLINT - int haz_it, int nav_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & haz_cam_intensity_timestamps, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::map const & haz_cam_to_left_nav_cam_index, // NOLINT - std::map const & haz_cam_to_right_nav_cam_index, // NOLINT - camera::CameraParameters const & nav_cam_params, // NOLINT - camera::CameraParameters const & haz_cam_params, // NOLINT - std::vector const & depth_to_nav_block_sizes, // NOLINT - std::vector const & depth_to_haz_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - // Outputs // NOLINT - std::vector & residual_names, // NOLINT - double & hazcam_depth_to_image_scale, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & hazcam_depth_to_image_vec, // NOLINT - ceres::Problem & problem) { // NOLINT + void add_haz_nav_cost // NOLINT + (// Inputs // NOLINT + int haz_it, int nav_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & haz_cam_intensity_timestamps, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::map const & haz_cam_to_left_nav_cam_index, // NOLINT + std::map const & haz_cam_to_right_nav_cam_index, // NOLINT + camera::CameraParameters const & nav_cam_params, // NOLINT + camera::CameraParameters const & haz_cam_params, // NOLINT + std::vector const & depth_to_nav_block_sizes, // NOLINT + std::vector const & depth_to_haz_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + // Outputs // NOLINT + std::vector & residual_names, // NOLINT + double & hazcam_depth_to_image_scale, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & hazcam_depth_to_image_vec, // NOLINT + ceres::Problem & problem) { // NOLINT // Figure out the two nav cam indices bounding the current haz cam // Must have sparse_map_timestamp + navcam_to_hazcam_timestamp_offset <= haz_timestamp // which must be < next sparse_map_timestamp + navcam_to_hazcam_timestamp_offset. @@ -917,7 +885,7 @@ void calc_median_residuals(std::vector const& residuals, residual_names.push_back("haznavhaz1"); residual_names.push_back("haznavhaz2"); - // Ensure that the the depth points projects well in the nav cam interest point + // Ensure that the depth points projects well in the nav cam interest point ceres::CostFunction* depth_to_nav_cost_function = dense_map::DepthToNavError::Create(undist_nav_ip, depth_xyz, alpha, match_left, depth_to_nav_block_sizes, nav_cam_params); @@ -1079,7 +1047,7 @@ void calc_median_residuals(std::vector const& residuals, residual_names.push_back("hazscihaz1"); residual_names.push_back("hazscihaz2"); - // Ensure that the the depth points projects well in the sci cam interest point. + // Ensure that the depth points projects well in the sci cam interest point. // Note how we pass a distorted sci cam pix, as in that error function we will // take the difference of distorted pixels. ceres::CostFunction* depth_to_sci_cost_function @@ -1107,38 +1075,39 @@ void calc_median_residuals(std::vector const& residuals, } // Prevent the linter from messing up with the beautiful formatting below - void add_nav_sci_cost(// Inputs // NOLINT - int nav_it, int sci_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - double scicam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::vector const & sci_cam_timestamps, // NOLINT - std::map const & sci_cam_to_left_nav_cam_index, // NOLINT - std::map const & sci_cam_to_right_nav_cam_index, // NOLINT - Eigen::Affine3d const & hazcam_to_navcam_aff_trans, // NOLINT - Eigen::Affine3d const & scicam_to_hazcam_aff_trans, // NOLINT - camera::CameraParameters const & nav_cam_params, // NOLINT - camera::CameraParameters const & sci_cam_params, // NOLINT - std::vector const & nav_block_sizes, // NOLINT - std::vector const & sci_block_sizes, // NOLINT - std::vector const & mesh_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - mve::TriangleMesh::Ptr const & mesh, // NOLINT - std::shared_ptr const & bvh_tree, // NOLINT - // Outputs // NOLINT - int & nav_sci_xyz_count, // NOLINT - std::vector & residual_names, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & scicam_to_hazcam_vec, // NOLINT - Eigen::Vector2d & sci_cam_focal_vector, // NOLINT - Eigen::Vector2d & sci_cam_optical_center, // NOLINT - Eigen::VectorXd & sci_cam_distortion, // NOLINT - std::vector & initial_nav_sci_xyz, // NOLINT - std::vector & nav_sci_xyz, // NOLINT - ceres::Problem & problem) { // NOLINT + void add_nav_sci_cost + (// Inputs // NOLINT + int nav_it, int sci_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + double scicam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::vector const & sci_cam_timestamps, // NOLINT + std::map const & sci_cam_to_left_nav_cam_index, // NOLINT + std::map const & sci_cam_to_right_nav_cam_index, // NOLINT + Eigen::Affine3d const & hazcam_to_navcam_aff_trans, // NOLINT + Eigen::Affine3d const & scicam_to_hazcam_aff_trans, // NOLINT + camera::CameraParameters const & nav_cam_params, // NOLINT + camera::CameraParameters const & sci_cam_params, // NOLINT + std::vector const & nav_block_sizes, // NOLINT + std::vector const & sci_block_sizes, // NOLINT + std::vector const & mesh_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + mve::TriangleMesh::Ptr const & mesh, // NOLINT + std::shared_ptr const & bvh_tree, // NOLINT + // Outputs // NOLINT + int & nav_sci_xyz_count, // NOLINT + std::vector & residual_names, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & scicam_to_hazcam_vec, // NOLINT + Eigen::Vector2d & sci_cam_focal_vector, // NOLINT + Eigen::Vector2d & sci_cam_optical_center, // NOLINT + Eigen::VectorXd & sci_cam_distortion, // NOLINT + std::vector & initial_nav_sci_xyz, // NOLINT + std::vector & nav_sci_xyz, // NOLINT + ceres::Problem & problem) { // NOLINT auto left_it = sci_cam_to_left_nav_cam_index.find(sci_it); auto right_it = sci_cam_to_right_nav_cam_index.find(sci_it); if (left_it == sci_cam_to_left_nav_cam_index.end() || @@ -1295,8 +1264,33 @@ void calc_median_residuals(std::vector const& residuals, return; } - // TODO(oalexan1): This selects haz cam images outside of bracket. - // TODO(oalexan1): No check for bracket size either. + void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { + int raw_image_cols = image.cols; + int raw_image_rows = image.rows; + int calib_image_cols = cam_params.GetDistortedSize()[0]; + int calib_image_rows = cam_params.GetDistortedSize()[1]; + + int factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" + << "These must be equal up to an integer factor.\n"; + } + + if (factor != 1) { + // TODO(oalexan1): This kind of resizing may be creating aliased images. + cv::Mat local_image; + cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); + local_image.copyTo(image); + } + + // Check + if (image.cols != calib_image_cols || image.rows != calib_image_rows) + LOG(FATAL) << "Sci cam images have the wrong size."; + } + void select_images_to_match(// Inputs // NOLINT double haz_cam_start_time, // NOLINT double navcam_to_hazcam_timestamp_offset, // NOLINT @@ -1332,8 +1326,8 @@ void calc_median_residuals(std::vector const& residuals, double navcam_to_scicam_timestamp_offset = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; - // Use these to keep track where in the bags we are. After one traversal forward - // in time they need to be reset. + // Use these to keep track where in the bags we are. After one + // traversal forward in time they need to be reset. int nav_cam_pos = 0, haz_cam_intensity_pos = 0, haz_cam_cloud_pos = 0, sci_cam_pos = 0; for (size_t map_it = 0; map_it + 1 < sparse_map_timestamps.size(); map_it++) { @@ -1355,9 +1349,8 @@ void calc_median_residuals(std::vector const& residuals, if (!dense_map::lookupImage(sparse_map_timestamps[map_it], nav_cam_handle.bag_msgs, save_grayscale, images.back(), nav_cam_pos, found_time)) { - std::cout.precision(17); - std::cout << "Cannot look up nav cam at time " << sparse_map_timestamps[map_it] << std::endl; - LOG(FATAL) << "Cannot look up nav cam image at given time"; + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up nav cam at time " << sparse_map_timestamps[map_it] << ".\n"; } cid_to_image_type.push_back(dense_map::NAV_CAM); @@ -1445,15 +1438,15 @@ void calc_median_residuals(std::vector const& residuals, << nav_start << ' ' << sci_time << ' ' << nav_end << std::endl; std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; - // Read an image at 25% resolution + // Read the sci cam image, and perhaps adjust its size images.push_back(cv::Mat()); cv::Mat local_img; if (!dense_map::lookupImage(sci_cam_timestamps.back(), sci_cam_handle.bag_msgs, save_grayscale, local_img, sci_cam_pos, found_time)) - LOG(FATAL) << "Cannot look up sci cam image at given time"; - - cv::resize(local_img, images.back(), cv::Size(), 0.25, 0.25, cv::INTER_AREA); + LOG(FATAL) << "Cannot look up sci cam image at given time."; + adjustImageSize(sci_cam_params, local_img); + local_img.copyTo(images.back()); // Sanity check Eigen::Vector2i sci_cam_size = sci_cam_params.GetDistortedSize(); @@ -1498,7 +1491,7 @@ void calc_median_residuals(std::vector const& residuals, // Set up the variable blocks to optimize for DepthToHazError depth_to_haz_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_haz_block_sizes.push_back(dense_map::NUM_SCALE_PARAMS); + depth_to_haz_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); // Set up the variable blocks to optimize for NavError nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); @@ -1509,7 +1502,7 @@ void calc_median_residuals(std::vector const& residuals, depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_SCALE_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); // Set up the variable blocks to optimize for DepthToSciError depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); @@ -1517,7 +1510,7 @@ void calc_median_residuals(std::vector const& residuals, depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_SCALE_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); depth_to_sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length depth_to_sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center depth_to_sci_block_sizes.push_back(num_scicam_distortions); // distortion @@ -1765,11 +1758,11 @@ int main(int argc, char** argv) { #if 0 std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() << "\n"; #endif + std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; + std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; // Convert hazcam_to_navcam_trans to Affine3d Eigen::Affine3d hazcam_to_navcam_aff_trans; @@ -2194,7 +2187,8 @@ int main(int argc, char** argv) { ceres::Solve(options, &problem, &summary); // Copy back the optimized intrinsics - sci_cam_params.SetFocalLength(Eigen::Vector2d(sci_cam_focal_vector[0], sci_cam_focal_vector[0])); + sci_cam_params.SetFocalLength(Eigen::Vector2d(sci_cam_focal_vector[0], + sci_cam_focal_vector[0])); sci_cam_params.SetOpticalOffset(sci_cam_optical_center); sci_cam_params.SetDistortion(sci_cam_distortion); diff --git a/dense_map/geometry_mapper/tools/camera_refiner2.cc b/dense_map/geometry_mapper/tools/camera_refiner2.cc new file mode 100644 index 00000000..a419be0d --- /dev/null +++ b/dense_map/geometry_mapper/tools/camera_refiner2.cc @@ -0,0 +1,2151 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +// TODO(oalexan1): Consider adding a haz cam to haz cam +// reprojection error in the camera refiner. There will be more +// haz to haz matches than haz to nav or haz to sci. +// TODO(oalexan1): Must document that the cloud timestamp +// that is looked up is what is closest to image timestamp. +// TODO(oalexan1): What if the wrong cloud is looked up +// for given image? Or if the delay is too big? + +// TODO(oalexan1): Must test the DepthError with no bracketing. + +// TODO(oalexan1): Move this to utils +// Get rid of warning beyond our control +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic push +#include +#include +#include +#include +#include +#pragma GCC diagnostic pop + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and " + "full-resolution sci_cam data."); + +DEFINE_string(sparse_map, "", + "A registered SURF sparse map made with some of the ROS bag data, " + "and including nav cam images closely bracketing the sci cam images."); + +DEFINE_string(output_map, "", "Output file containing the updated map."); + +DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); + +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", + "The depth point cloud topic in the bag file."); + +DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", + "The depth camera intensity topic in the bag file."); + +DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); + +DEFINE_int32(num_overlaps, 20, "How many images (of all camera types) close and forward in " + "time to match to given image."); + +DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, + "Use depth haz cam clouds that are within this distance in " + "time from the nearest haz cam intensity image."); + +DEFINE_double(robust_threshold, 3.0, + "Pixel errors much larger than this will be exponentially attenuated " + "to affect less the cost function."); + +DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calibration."); + +DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by " + "less than this."); + +DEFINE_double(bracket_len, 2.0, + "Lookup sci and haz cam images only between consecutive nav cam images " + "whose distance in time is no more than this (in seconds), after adjusting " + "for the timestamp offset between these cameras. It is assumed the robot " + "moves slowly and uniformly during this time."); + +DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); + +DEFINE_string(sci_cam_timestamps, "", + "Use only these sci cam timestamps. Must be " + "a file with one timestamp per line."); + +DEFINE_bool(float_sparse_map, false, "Optimize the sparse map, hence only the camera params."); + +DEFINE_bool(float_scale, false, + "If to optimize the scale of the clouds (use it if the " + "sparse map is kept fixed), or else rescaling and registration is needed."); + +DEFINE_bool(float_timestamp_offsets, false, + "If to optimize the timestamp offsets among the cameras."); + +DEFINE_double(timestamp_offsets_max_change, 1.0, + "If floating the timestamp offsets, do not let them change by more than this " + "(measured in seconds). Existing image bracketing acts as an additional constraint."); + +DEFINE_string(nav_cam_intrinsics_to_float, "", + "Refine 0 or more of the following intrinsics for nav_cam: focal_length, " + "optical_center, distortion. Specify as a quoted list. " + "For example: 'focal_length optical_center'."); + +DEFINE_string(haz_cam_intrinsics_to_float, "", + "Refine 0 or more of the following intrinsics for haz_cam: focal_length, " + "optical_center, distortion. Specify as a quoted list. " + "For example: 'focal_length optical_center'."); + +DEFINE_string(sci_cam_intrinsics_to_float, "", + "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " + "optical_center, distortion. Specify as a quoted list. " + "For example: 'focal_length optical_center'."); + +DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, + std::numeric_limits::quiet_NaN(), + "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "file with this value."); + +DEFINE_double(depth_weight, 10.0, + "The weight to give to depth measurements. Use a bigger number as " + "depth errors are usually small fractions of a meter."); + +DEFINE_string(mesh, "", + "Refine the sci cam so that the sci cam texture agrees with the nav cam " + "texture when projected on this mesh."); + +DEFINE_double(mesh_weight, 25.0, + "A larger value will give more weight to the mesh constraint. " + "Use a bigger number as depth errors are usually small fractions of a meter."); + +DEFINE_bool(verbose, false, + "Print the residuals and save the images and match files." + "Stereo Pipeline's viewer can be used for visualizing these."); + +namespace dense_map { + + // Calculate interpolated world to camera trans + Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, + const double* end_world_to_ref_t, + const double* ref_to_cam_trans, + double beg_ref_stamp, + double end_ref_stamp, + double ref_to_cam_offset, + double cam_stamp) { + Eigen::Affine3d beg_world_to_ref_aff; + array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); + + Eigen::Affine3d end_world_to_ref_aff; + array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); + + Eigen::Affine3d ref_to_cam_aff; + array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); + + // std::cout.precision(18); + // std::cout << "--beg stamp " << beg_ref_stamp << std::endl; + // std::cout << "--end stamp " << end_ref_stamp << std::endl; + // std::cout << "cam stamp " << cam_stamp << std::endl; + // std::cout.precision(18); + // std::cout << "ref to cam off " << ref_to_cam_offset << std::endl; + // std::cout << "--ref to cam trans\n" << ref_to_cam_aff.matrix() << std::endl; + + // Covert from cam time to ref time and normalize. It is very + // important that below we subtract the big numbers from each + // other first, which are the timestamps, then subtract whatever + // else is necessary. Otherwise we get problems with numerical + // precision with CERES. + double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) + / (end_ref_stamp - beg_ref_stamp); + + if (beg_ref_stamp == end_ref_stamp) + alpha = 0.0; // handle division by zero + + // std::cout << "--alpha " << alpha << std::endl; + if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; + + // Interpolate at desired time + Eigen::Affine3d interp_world_to_ref_aff + = dense_map::linearInterp(alpha, beg_world_to_ref_aff, end_world_to_ref_aff); + + Eigen::Affine3d interp_world_to_cam_afff = ref_to_cam_aff * interp_world_to_ref_aff; + + // std::cout << "final trans\n" << interp_world_to_cam_afff.matrix() << std::endl; + + return interp_world_to_cam_afff; + } + + // TODO(oalexan1): Store separately matches which end up being + // squashed in a pid_cid_to_fid. + +ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { + // Convert to lower-case + std::transform(cost_fun.begin(), cost_fun.end(), cost_fun.begin(), ::tolower); + + ceres::LossFunction* loss_function = NULL; + if (cost_fun == "l2") + loss_function = NULL; + else if (cost_fun == "huber") + loss_function = new ceres::HuberLoss(th); + else if (cost_fun == "cauchy") + loss_function = new ceres::CauchyLoss(th); + else if (cost_fun == "l1") + loss_function = new ceres::SoftLOneLoss(th); + else + LOG(FATAL) << "Unknown cost function: " + cost_fun; + + return loss_function; +} + +// An error function minimizing the error of projecting +// an xyz point into a camera that is bracketed by +// two reference cameras. The precise timestamp offset +// between them is also floated. +struct BracketedCamError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + BracketedCamError(Eigen::Vector2d const& meas_dist_pix, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params): + m_meas_dist_pix(meas_dist_pix), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes), + m_cam_params(cam_params), + m_num_focal_lengths(1) { + // Sanity check + if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || + m_block_sizes[3] != NUM_XYZ_PARAMS || m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != m_num_focal_lengths || m_block_sizes[6] != NUM_OPT_CTR_PARAMS || + m_block_sizes[7] != 1 // This will be overwritten shortly + ) { + LOG(FATAL) << "BracketedCamError: The block sizes were not set up properly.\n"; + } + + // Set the correct distortion size. This cannot be done in the interface for now. + // TODO(oalexan1): Make individual block sizes for each camera, then this issue will go away. + m_block_sizes[7] = m_cam_params.GetDistortion().size(); + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[4][0], // ref_to_cam_offset + m_cam_stamp); + + // World point + Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); + // std::cout << "--bracketX is " << X.transpose() << std::endl; + + // Make a deep copy which we will modify + camera::CameraParameters cam_params = m_cam_params; + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); + Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); + Eigen::VectorXd distortion(m_block_sizes[7]); + for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; + cam_params.SetFocalLength(focal_vector); + cam_params.SetOpticalOffset(optical_center); + cam_params.SetDistortion(distortion); + + // std::cout << "--focal vector " << focal_vector.transpose() << std::endl; + // std::cout << "--opt ctr " << optical_center.transpose() << std::endl; + // std::cout << "-dist " << distortion.transpose() << std::endl; + + // Convert world point to given cam coordinates + X = world_to_cam_trans * X; + + // std::cout << "--trans X " << X.transpose() << std::endl; + + // Project into the image + Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + Eigen::Vector2d curr_dist_pix; + cam_params.Convert(undist_pix, &curr_dist_pix); + + // Compute the residuals + residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; + residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; + + // std::cout << "--residuals " << residuals[0] << ' ' << residuals[1] << std::endl; + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* + Create(Eigen::Vector2d const& meas_dist_pix, double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes, + camera::CameraParameters const& cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedCamError(meas_dist_pix, left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes, cam_params)); + + cost_function->SetNumResiduals(NUM_RESIDUALS); + + // The camera wrapper knows all of the block sizes to add, except + // for distortion, which is last + for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 + cost_function->AddParameterBlock(block_sizes[i]); + + // The distortion block size is added separately as it is variable + cost_function->AddParameterBlock(cam_params.GetDistortion().size()); + + return cost_function; + } + + private: + Eigen::Vector2d m_meas_dist_pix; // Measured distorted current camera pixel + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp + std::vector m_block_sizes; + camera::CameraParameters m_cam_params; + int m_num_focal_lengths; +}; // End class BracketedCamError + +// An error function minimizing the error of projecting an xyz point +// into a reference camera. Bracketing, timestamps, and transform to +// ref cam are not needed. +struct RefCamError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + RefCamError(Eigen::Vector2d const& meas_dist_pix, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params): + m_meas_dist_pix(meas_dist_pix), + m_block_sizes(block_sizes), + m_cam_params(cam_params), + m_num_focal_lengths(1) { + // Sanity check + if (m_block_sizes.size() != 5 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_XYZ_PARAMS || + m_block_sizes[2] != m_num_focal_lengths || + m_block_sizes[3] != NUM_OPT_CTR_PARAMS || + m_block_sizes[4] != 1 // This will be overwritten shortly + ) { + LOG(FATAL) << "RefCamError: The block sizes were not set up properly.\n"; + } + + // Set the correct distortion size. This cannot be done in the interface for now. + + // TODO(oalexan1): Make individual block sizes for each camera, + // then this issue will go away. + m_block_sizes[4] = m_cam_params.GetDistortion().size(); + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + Eigen::Affine3d world_to_ref_t; + array_to_rigid_transform(world_to_ref_t, parameters[0]); + + // World point + Eigen::Vector3d X; + for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[1][it]; + // std::cout << "--refX is " << X.transpose() << std::endl; + + // Make a deep copy which we will modify + camera::CameraParameters cam_params = m_cam_params; + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[2][0], parameters[2][0]); + Eigen::Vector2d optical_center(parameters[3][0], parameters[3][1]); + Eigen::VectorXd distortion(m_block_sizes[4]); + for (int i = 0; i < m_block_sizes[4]; i++) distortion[i] = parameters[4][i]; + cam_params.SetFocalLength(focal_vector); + cam_params.SetOpticalOffset(optical_center); + cam_params.SetDistortion(distortion); + + // std::cout << "--focal vector " << focal_vector.transpose() << std::endl; + // std::cout << "--opt ctr " << optical_center.transpose() << std::endl; + // std::cout << "-dist " << distortion.transpose() << std::endl; + + // Convert world point to given cam coordinates + X = world_to_ref_t * X; + + // std::cout << "--trans X " << X.transpose() << std::endl; + + // Project into the image + Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + Eigen::Vector2d curr_dist_pix; + cam_params.Convert(undist_pix, &curr_dist_pix); + + // Compute the residuals + residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; + residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; + + // std::cout << "--residuals " << residuals[0] << ' ' << residuals[1] << std::endl; + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* + Create(Eigen::Vector2d const& meas_dist_pix, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new RefCamError(meas_dist_pix, block_sizes, cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_RESIDUALS); + + // The camera wrapper knows all of the block sizes to add, except + // for distortion, which is last + for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 + cost_function->AddParameterBlock(block_sizes[i]); + + // The distortion block size is added separately as it is variable + cost_function->AddParameterBlock(cam_params.GetDistortion().size()); + + return cost_function; + } + + private: + Eigen::Vector2d m_meas_dist_pix; + std::vector m_block_sizes; + camera::CameraParameters m_cam_params; + int m_num_focal_lengths; +}; // End class RefCamError + +// An error function minimizing the product of a given weight and the +// error between a triangulated point and a measured depth point. The +// depth point needs to be transformed to world coordinates first. For +// that one has to do pose interpolation. +struct BracketedDepthError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + BracketedDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 7 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || + m_block_sizes[3] != NUM_RIGID_PARAMS || + m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != NUM_XYZ_PARAMS || + m_block_sizes[6] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "BracketedDepthError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Current world to camera transform + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[6][0], // ref_to_cam_offset + m_cam_stamp); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + array_to_rigid_transform(depth_to_image, parameters[3]); + + // Apply the scale + double depth_to_image_scale = parameters[4][0]; + depth_to_image.linear() *= depth_to_image_scale; + // std::cout << "--depth to image:\n" << depth_to_image.matrix() << std::endl; + + // std::cout << "--meas pt " << m_meas_depth_xyz.transpose() << std::endl; + + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; + + // std::cout << "--image meas pt " << M.transpose() << std::endl; + + // Convert to world coordinates + M = world_to_cam_trans.inverse() * M; + // std::cout << "--depth in world coords " << M.transpose() << std::endl; + + // Triangulated world point + Eigen::Vector3d X(parameters[5][0], parameters[5][1], parameters[5][2]); + // std::cout << "--triangulated X is " << X.transpose() << std::endl; + + // std::cout << "--weight " << m_weight << std::endl; + + // Compute the residuals + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (X[it] - M[it]); + // std::cout << "--residual " << residuals[it] << std::endl; + } + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, + double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedDepthError(weight, meas_depth_xyz, left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + for (size_t i = 0; i < block_sizes.size(); i++) + cost_function->AddParameterBlock(block_sizes[i]); + + return cost_function; + } + + private: + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp + std::vector m_block_sizes; +}; // End class BracketedDepthError + +// An error function minimizing the product of a given weight and the +// error between a triangulated point and a measured depth point for +// the ref camera. The depth point needs to be transformed to world +// coordinates first. +struct RefDepthError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + RefDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 4 || m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_SCALAR_PARAMS || + m_block_sizes[3] != NUM_XYZ_PARAMS) { + LOG(FATAL) << "RefDepthError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Current world to camera transform + Eigen::Affine3d world_to_cam; + array_to_rigid_transform(world_to_cam, parameters[0]); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + array_to_rigid_transform(depth_to_image, parameters[1]); + + // Apply the scale + double depth_to_image_scale = parameters[2][0]; + depth_to_image.linear() *= depth_to_image_scale; + // std::cout << "--depth to image:\n" << depth_to_image.matrix() << std::endl; + + // std::cout << "--meas pt " << m_meas_depth_xyz.transpose() << std::endl; + + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; + + // std::cout << "--image meas pt " << M.transpose() << std::endl; + + // Convert to world coordinates + M = world_to_cam.inverse() * M; + // std::cout << "--depth in world coords " << M.transpose() << std::endl; + + // Triangulated world point + Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); + // std::cout << "--triangulated X is " << X.transpose() << std::endl; + + // std::cout << "--weight " << m_weight << std::endl; + + // Compute the residuals + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (X[it] - M[it]); + // std::cout << "--residual " << residuals[it] << std::endl; + } + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, + std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new RefDepthError(weight, meas_depth_xyz, block_sizes)); + + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + for (size_t i = 0; i < block_sizes.size(); i++) + cost_function->AddParameterBlock(block_sizes[i]); + + return cost_function; + } + + private: + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + std::vector m_block_sizes; +}; // End class RefDepthError + +// An error function minimizing a weight times the distance from a +// variable xyz point to a fixed reference xyz point. +struct XYZError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + XYZError(Eigen::Vector3d const& ref_xyz, std::vector const& block_sizes, double weight) + : m_ref_xyz(ref_xyz), m_block_sizes(block_sizes), m_weight(weight) { + // Sanity check + if (m_block_sizes.size() != 1 || m_block_sizes[0] != NUM_XYZ_PARAMS) + LOG(FATAL) << "XYZError: The block sizes were not set up properly.\n"; + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + // Takes array of arrays as parameters. + // TODO(oalexan1): May want to use the analytical Ceres cost function + bool operator()(double const* const* parameters, double* residuals) const { + // Compute the residuals + for (int it = 0; it < NUM_XYZ_PARAMS; it++) residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector3d const& ref_xyz, + std::vector const& block_sizes, + double weight) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction(new XYZError(ref_xyz, block_sizes, weight)); + + // The residual size is always the same + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector3d m_ref_xyz; // reference xyz + std::vector m_block_sizes; + double m_weight; +}; // End class XYZError + +enum imgType { NAV_CAM, SCI_CAM, HAZ_CAM }; + +// Calculate the rmse residual for each residual type. +void calc_median_residuals(std::vector const& residuals, + std::vector const& residual_names, + std::string const& tag) { + size_t num = residuals.size(); + + if (num != residual_names.size()) + LOG(FATAL) << "There must be as many residuals as residual names."; + + std::map > stats; + for (size_t it = 0; it < residuals.size(); it++) + stats[residual_names[it]] = std::vector(); // initialize + + for (size_t it = 0; it < residuals.size(); it++) + stats[residual_names[it]].push_back(std::abs(residuals[it])); + + std::cout << "The 25, 50 and 75 percentile residual stats " << tag << std::endl; + for (auto it = stats.begin(); it != stats.end(); it++) { + std::string const& name = it->first; + std::vector vals = stats[name]; // make a copy + std::sort(vals.begin(), vals.end()); + + int len = vals.size(); + + int it1 = static_cast(0.25 * len); + int it2 = static_cast(0.50 * len); + int it3 = static_cast(0.75 * len); + + if (len == 0) + std::cout << name << ": " << "none"; + else + std::cout << std::setprecision(8) + << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3]; + std::cout << " (" << len << " residuals)" << std::endl; + } +} + + // TODO(oalexan1): This needs to be handled better. + void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { + int raw_image_cols = image.cols; + int raw_image_rows = image.rows; + int calib_image_cols = cam_params.GetDistortedSize()[0]; + int calib_image_rows = cam_params.GetDistortedSize()[1]; + + int factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" + << "These must be equal up to an integer factor.\n"; + } + + if (factor != 1) { + // TODO(oalexan1): This kind of resizing may be creating aliased images. + cv::Mat local_image; + cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); + local_image.copyTo(image); + } + + // Check + if (image.cols != calib_image_cols || image.rows != calib_image_rows) + LOG(FATAL) << "Sci cam images have the wrong size."; + } + + typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; + + // A class to encompass all known information about a camera + // This is work in progress and will replace some of the logic further down. + struct cameraImage { + // An index to look up the type of camera. This will equal the + // value ref_camera_type if and only if this is a reference + // camera. + int camera_type; + + // The timestamp for this camera (in floating point seconds since epoch) + double timestamp; + + // The timestamp with an adjustment added to it to be in + // reference camera time + double ref_timestamp; + + // Indices to look up the reference cameras bracketing this camera + // in time. The two indices will have same value if and only if + // this is a reference camera. + int beg_ref_index; + int end_ref_index; + + // The image for this camera, in grayscale + cv::Mat image; + + // The corresponding depth cloud, for an image + depth camera + cv::Mat depth_cloud; + }; + + // Sort by timestamps in the ref camera clock + bool timestampLess(cameraImage i, cameraImage j) { + return (i.ref_timestamp < j.ref_timestamp); + } + + // Find the haz cam depth measurement. Use nearest neighbor interpolation + // to look into the depth cloud. + bool depthValue( // Inputs + cv::Mat const& depth_cloud, Eigen::Vector2d const& dist_ip, + // Output + Eigen::Vector3d& depth_xyz) { + depth_xyz = Eigen::Vector3d(0, 0, 0); // initialize + + if (depth_cloud.cols == 0 && depth_cloud.rows == 0) return false; // empty cloud + + int col = round(dist_ip[0]); + int row = round(dist_ip[1]); + + if (col < 0 || row < 0 || col > depth_cloud.cols || row > depth_cloud.rows) + LOG(FATAL) << "Out of range in depth cloud."; + + // After rounding one may hit the bound + if (col == depth_cloud.cols || row == depth_cloud.rows) + return false; + + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) + return false; + + depth_xyz = Eigen::Vector3d(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + + return true; + } + + // Rebuild the map. + // TODO(oalexan1): This must be integrated in astrobee. + void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data + camera::CameraParameters const& cam_params, + boost::shared_ptr sparse_map) { + std::string rebuild_detector = "SURF"; + std::cout << "Rebuilding map with " << rebuild_detector << " detector."; + + // Copy some data to make sure it does not get lost on resetting things below + std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; + std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; + + // Ensure the new camera parameters are set + sparse_map->SetCameraParameters(cam_params); + + std::cout << "Detecting features."; + sparse_map->DetectFeatures(); + + std::cout << "Matching features."; + // Borrow from the original map which images should be matched with which. + sparse_map->cid_to_cid_.clear(); + for (size_t p = 0; p < pid_to_cid_fid.size(); p++) { + std::map const& track = pid_to_cid_fid[p]; // alias + for (std::map::const_iterator it1 = track.begin(); + it1 != track.end() ; it1++) { + for (std::map::const_iterator it2 = it1; + it2 != track.end() ; it2++) { + if (it1->first != it2->first) { + // Never match an image with itself + sparse_map->cid_to_cid_[it1->first].insert(it2->first); + } + } + } + } + + sparse_mapping::MatchFeatures(sparse_mapping::EssentialFile(map_file), + sparse_mapping::MatchesFile(map_file), sparse_map.get()); + for (size_t i = 0; i < world_to_ref_t.size(); i++) + sparse_map->SetFrameGlobalTransform(i, world_to_ref_t[i]); + + // Wipe file that is no longer needed + try { + std::remove(sparse_mapping::EssentialFile(map_file).c_str()); + }catch(...) {} + + std::cout << "Building tracks."; + bool rm_invalid_xyz = true; // by now cameras are good, so filter out bad stuff + sparse_mapping::BuildTracks(rm_invalid_xyz, + sparse_mapping::MatchesFile(map_file), + sparse_map.get()); + + // It is essential that during re-building we do not vary the + // cameras. Those were usually computed with a lot of SURF features, + // while rebuilding is usually done with many fewer ORGBRISK + // features. + bool fix_cameras = true; + if (fix_cameras) + std::cout << "Performing bundle adjustment with fixed cameras."; + else + std::cout << "Performing bundle adjustment while floating cameras."; + + sparse_mapping::BundleAdjust(fix_cameras, sparse_map.get()); + } + + // TODO(oalexan1): Move this to utils. + // Intersect ray with mesh. Return true on success. + bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + double min_ray_dist, double max_ray_dist, + // Output + Eigen::Vector3d& intersection) { + // Initialize the output + intersection = Eigen::Vector3d(0.0, 0.0, 0.0); + + // Ray from camera going through the pixel + Eigen::Vector3d cam_ray(undist_pix.x() / cam_params.GetFocalVector()[0], + undist_pix.y() / cam_params.GetFocalVector()[1], 1.0); + cam_ray.normalize(); + + Eigen::Affine3d cam_to_world = world_to_cam.inverse(); + Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; + Eigen::Vector3d cam_ctr = cam_to_world.translation(); + + // Set up the ray structure for the mesh + BVHTree::Ray bvh_ray; + bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); + bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); + bvh_ray.dir.normalize(); + + bvh_ray.tmin = min_ray_dist; + bvh_ray.tmax = max_ray_dist; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (bvh_tree->intersect(bvh_ray, &hit)) { + double cam_to_mesh_dist = hit.t; + intersection = cam_ctr + cam_to_mesh_dist * world_ray; + return true; + } + + return false; + } + +} // namespace dense_map + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; + + std::cout.precision(17); // to be able to print timestamps + + if (FLAGS_ros_bag.empty()) + LOG(FATAL) << "The bag file was not specified."; + if (FLAGS_sparse_map.empty()) + LOG(FATAL) << "The input sparse map was not specified."; + + if (FLAGS_output_map.empty()) + LOG(FATAL) << "The output sparse map was not specified."; + + if (FLAGS_robust_threshold <= 0.0) + LOG(FATAL) << "The robust threshold must be positive.\n"; + + if (FLAGS_bracket_len <= 0.0) LOG(FATAL) << "Bracket length must be positive."; + + if (FLAGS_num_overlaps < 1) LOG(FATAL) << "Number of overlaps must be positive."; + + if (FLAGS_timestamp_offsets_max_change < 0) + LOG(FATAL) << "The timestamp offsets must be non-negative."; + + // Read the config file + double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; + Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::Affine3d hazcam_depth_to_image_transform; + hazcam_depth_to_image_transform.setIdentity(); // default value + camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), + Eigen::Vector2d(0, 0)); + camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), + Eigen::Vector2d(0, 0)); + camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), + Eigen::Vector2d(0, 0)); + dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", // NOLINT + "scicam_to_hazcam_timestamp_offset", // NOLINT + "hazcam_to_navcam_transform", // NOLINT + "scicam_to_hazcam_transform", // NOLINT + "nav_cam_transform", // NOLINT + "hazcam_depth_to_image_transform", // NOLINT + navcam_to_hazcam_timestamp_offset, + scicam_to_hazcam_timestamp_offset, + hazcam_to_navcam_trans, scicam_to_hazcam_trans, + navcam_to_body_trans, hazcam_depth_to_image_transform, + nav_cam_params, haz_cam_params, + sci_cam_params); + + if (!std::isnan(FLAGS_scicam_to_hazcam_timestamp_offset_override_value)) { + double new_val = FLAGS_scicam_to_hazcam_timestamp_offset_override_value; + std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset + << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; + scicam_to_hazcam_timestamp_offset = new_val; + } + + if (FLAGS_mesh_weight <= 0.0) + LOG(FATAL) << "The mesh weight must be positive.\n"; + + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; + std::shared_ptr bvh_tree; + if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + + // If desired to process only specific timestamps + std::set sci_cam_timestamps_to_use; + if (FLAGS_sci_cam_timestamps != "") { + std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); + double val; + while (ifs >> val) sci_cam_timestamps_to_use.insert(val); + } + +#if 0 + std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; + std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; + std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() + << "\n"; +#endif + std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; + std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; + + double hazcam_depth_to_image_scale + = pow(hazcam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); + + // Since we will keep the scale fixed, vary the part of the transform without + // the scale, while adding the scale each time before the transform is applied + Eigen::Affine3d hazcam_depth_to_image_noscale = hazcam_depth_to_image_transform; + hazcam_depth_to_image_noscale.linear() /= hazcam_depth_to_image_scale; + + // Convert hazcam_to_navcam_trans to Affine3d + Eigen::Affine3d hazcam_to_navcam_aff_trans; + hazcam_to_navcam_aff_trans.matrix() = hazcam_to_navcam_trans; + + // Convert scicam_to_hazcam_trans to Affine3d + Eigen::Affine3d scicam_to_hazcam_aff_trans; + scicam_to_hazcam_aff_trans.matrix() = scicam_to_hazcam_trans; + + // Read the sparse map + boost::shared_ptr sparse_map = + boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + + // TODO(oalexan1): All this timestamp reading logic below should be in a function + + // Parse the ref timestamps from the sparse map + // We assume the sparse map image names are the timestamps. + std::vector ref_timestamps; + const std::vector& sparse_map_images = sparse_map->cid_to_filename_; + ref_timestamps.resize(sparse_map_images.size()); + for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { + double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); + ref_timestamps[cid] = timestamp; + } + if (ref_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; + + // Will optimize the nav cam poses as part of the process + std::vector & world_to_ref_t = sparse_map->cid_to_cam_t_global_; // alias + + // Put transforms of the reference cameras in a vector. We will optimize them. + int num_ref_cams = world_to_ref_t.size(); + if (world_to_ref_t.size() != ref_timestamps.size()) + LOG(FATAL) << "Must have as many ref cam timestamps as ref cameras.\n"; + std::vector world_to_ref_vec(num_ref_cams * dense_map::NUM_RIGID_PARAMS); + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::rigid_transform_to_array(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // We assume our camera rig has n camera types. Each can be image or + // depth + image. Just one camera must be the reference camera. In + // this code it will be nav_cam. Every camera object (class + // cameraImage) knows its type (an index), which can be used to look + // up its intrinsics, image topic, depth topic (if present), + // ref_to_cam_timestamp_offset, and ref_to_cam_transform. A camera + // object also stores its image, depth cloud (if present), its + // timestamp, and indices pointing to its left and right ref + // bracketing cameras. + + // For every instance of a reference camera its + // ref_to_cam_timestamp_offset is 0 and kept fixed, + // ref_to_cam_transform is the identity and kept fixed, and the + // indices pointing to the left and right ref bracketing cameras are + // identical. + + // The info below will eventually come from a file + int num_cam_types = 3; + int ref_cam_type = 0; // Below we assume the starting cam is the ref cam + + // Image and depth topics + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", + "/hw/depth_haz/extended/amplitude_int", + "/hw/cam_sci/compressed"}; + std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; + + std::vector> cam_timestamps_to_use = {std::set(), + std::set(), + sci_cam_timestamps_to_use}; + + // The timestamp offsets from ref cam to given cam + std::vector ref_to_cam_timestamp_offsets = + {0.0, + navcam_to_hazcam_timestamp_offset, + navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset}; + + for (size_t it = 0; it < ref_to_cam_timestamp_offsets.size(); it++) { + std::cout << "Ref to cam offset for " << cam_names[it] << ' ' + << ref_to_cam_timestamp_offsets[it] << std::endl; + } + + std::vector cam_params = {nav_cam_params, + haz_cam_params, + sci_cam_params}; + + // Which intrinsics from which cameras to float + std::vector> intrinsics_to_float(num_cam_types); + dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); + dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); + dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); + + // The transform from ref to given cam + std::vector ref_to_cam_trans; + ref_to_cam_trans.push_back(Eigen::Affine3d::Identity()); + ref_to_cam_trans.push_back(hazcam_to_navcam_aff_trans.inverse()); + ref_to_cam_trans.push_back(scicam_to_hazcam_aff_trans.inverse() + * hazcam_to_navcam_aff_trans.inverse()); + + // Put in arrays, so we can optimize them + std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::rigid_transform_to_array + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + for (size_t it = 0; it < ref_to_cam_trans.size(); it++) { + std::cout << "Ref to cam transform for " << cam_names[it] << ":\n" + << ref_to_cam_trans[it].matrix() << std::endl; + } + + // Depth to image transforms and scales + std::vector depth_to_image_noscale; + std::vector depth_to_image_scales = {1.0, hazcam_depth_to_image_scale, 1.0}; + depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); + depth_to_image_noscale.push_back(hazcam_depth_to_image_noscale); + depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); + // Put in arrays, so we can optimize them + std::vector depth_to_image_noscale_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::rigid_transform_to_array + (depth_to_image_noscale[cam_type], + &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Put the intrinsics in arrays + std::vector focal_lengths(num_cam_types); + std::vector optical_centers(num_cam_types); + std::vector distortions(num_cam_types); + for (int it = 0; it < num_cam_types; it++) { + focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths + optical_centers[it] = cam_params[it].GetOpticalOffset(); + distortions[it] = cam_params[it].GetDistortion(); + } + + // Build a map for quick access for all the messages we may need + // TODO(oalexan1): Must the view be kept open for this to work? + std::vector topics; + for (auto it = 0; it < image_topics.size(); it++) + if (image_topics[it] != "") topics.push_back(image_topics[it]); + for (auto it = 0; it < depth_topics.size(); it++) + if (depth_topics[it] != "") topics.push_back(depth_topics[it]); + rosbag::Bag bag; + bag.open(FLAGS_ros_bag, rosbag::bagmode::Read); + rosbag::View view(bag, rosbag::TopicQuery(topics)); + std::map> bag_map; + dense_map::indexMessages(view, bag_map); + + // A lot of care is needed here. This remembers how we travel in time + // for each camera type so we have fewer messages to search. + // But if a mistake is done below it will mess up this bookkeeping. + std::vector image_start_positions(num_cam_types, 0); + std::vector cloud_start_positions(num_cam_types, 0); + + // Cannot add a (positive) value more this to + // ref_to_cam_timestamp_offsets[cam_type] before getting out of the + // bracket. + std::vector upper_bound(num_cam_types, 1.0e+100); + + // Cannot add a (negative) value less than this to + // ref_to_cam_timestamp_offsets[cam_type] before getting out of the + // bracket. + std::vector lower_bound(num_cam_types, -1.0e+100); + + std::cout << "Bracketing the images in time." << std::endl; + + // Populate the data for each camera image + std::vector cams; + for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { + std::cout.precision(18); + + if (ref_cam_type != 0) + LOG(FATAL) << "It is assumed that the ref cam type is 0."; + + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + dense_map::cameraImage cam; + bool success = false; + if (cam_type == ref_cam_type) { + cam.camera_type = cam_type; + cam.timestamp = ref_timestamps[ref_it]; + cam.ref_timestamp = cam.timestamp; // the time offset is 0 between ref and itself + cam.beg_ref_index = ref_it; + cam.end_ref_index = ref_it; + + // Search the whole set of timestamps, so set start_pos = + // 0. This is slower but more robust than keeping track of how + // we move in the increasing order of time. + int start_pos = 0; + bool save_grayscale = true; // for matching we will need grayscale + double found_time = -1.0; + // this has to succeed since we picked the ref images in the map + if (!dense_map::lookupImage(cam.timestamp, bag_map[image_topics[cam_type]], save_grayscale, + // outputs + cam.image, image_start_positions[cam_type], // care here + found_time)) + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up camera at time " << cam.timestamp << ".\n"; + + // The exact time is expected + if (found_time != cam.timestamp) + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up camera at time " << cam.timestamp << ".\n"; + + // See if to skip this timestamp + if (!cam_timestamps_to_use[cam_type].empty() && + cam_timestamps_to_use[cam_type].find(cam.timestamp) == + cam_timestamps_to_use[cam_type].end()) + continue; + + success = true; + + } else { + if (ref_it + 1 >= num_ref_cams) break; // Arrived at the end, cannot do a bracket + + // Convert the bracketing timestamps to current cam's time + double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_timestamp_offsets[cam_type]; + double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_timestamp_offsets[cam_type]; + + if (right_timestamp <= left_timestamp) + LOG(FATAL) << "Ref timestamps must be in increasing order.\n"; + + if (right_timestamp - left_timestamp > FLAGS_bracket_len) + continue; // Must respect the bracket length + + // Find the image timestamp closest to the midpoint of the brackets. This will give + // more room to vary the timestamp later. + double mid_timestamp = (left_timestamp + right_timestamp)/2.0; + + // Search forward in time from image_start_positions[cam_type]. + // We will update that too later. One has to be very careful + // with it so it does not go too far forward in time + // so that at the next iteration we are passed what we + // search for. + int start_pos = image_start_positions[cam_type]; // care here + bool save_grayscale = true; // for matching we will need grayscale + double curr_timestamp = left_timestamp; // start here + cv::Mat best_image; + double best_dist = 1.0e+100; + double best_time = -1.0, found_time = -1.0; + while (1) { + if (found_time >= right_timestamp) break; // out of range + + cv::Mat image; + if (!dense_map::lookupImage(curr_timestamp, bag_map[image_topics[cam_type]], save_grayscale, + // outputs + image, + start_pos, // care here + found_time)) + break; // Need not succeed, but then there's no need to go on are we are at the end + + std::cout.precision(18); + + double curr_dist = std::abs(found_time - mid_timestamp); + if (curr_dist < best_dist) { + best_dist = curr_dist; + best_time = found_time; + // Update the start position for the future only if this is a good + // solution. Otherwise we may have moved too far. + image_start_positions[cam_type] = start_pos; + image.copyTo(best_image); + } + + // Go forward in time. We count on the fact that lookupImage() looks forward from given guess. + curr_timestamp = std::nextafter(found_time, 1.01 * found_time); + } + + if (best_time < 0.0) continue; // bracketing failed + + // Note how we allow best_time == left_timestamp if there's no other choice + if (best_time < left_timestamp || best_time >= right_timestamp) continue; // no luck + + cam.camera_type = cam_type; + cam.timestamp = best_time; + cam.ref_timestamp = best_time - ref_to_cam_timestamp_offsets[cam_type]; + cam.beg_ref_index = ref_it; + cam.end_ref_index = ref_it + 1; + cam.image = best_image; + + upper_bound[cam_type] = std::min(upper_bound[cam_type], best_time - left_timestamp); + lower_bound[cam_type] = std::max(lower_bound[cam_type], best_time - right_timestamp); + + // TODO(oalexan1): Wipe this temporary block + if (cam_type == 1) { + // Must compare raw big timestamps before and after! + // Must compare residuals! + + double nav_start = ref_timestamps[ref_it]; + double haz_time = cam.ref_timestamp; + double nav_end = ref_timestamps[ref_it + 1]; + // std::cout << "--xxxhaz after " << haz_time - nav_start << ' ' << nav_end - haz_time << + // ' ' << nav_end - nav_start << std::endl; + } + if (cam_type == 2) { + double nav_start = ref_timestamps[ref_it]; + double sci_time = cam.ref_timestamp; + double nav_end = ref_timestamps[ref_it + 1]; + // std::cout << "--xxxsci after " << sci_time - nav_start << ' ' << nav_end - sci_time << + // ' ' << nav_end - nav_start << std::endl; + } + + // See if to skip this timestamp + if (!cam_timestamps_to_use[cam_type].empty() && + cam_timestamps_to_use[cam_type].find(cam.timestamp) == + cam_timestamps_to_use[cam_type].end()) + continue; + + success = true; + } + + if (!success) continue; + + if (depth_topics[cam_type] != "") { + double found_time = -1.0; + cv::Mat cloud; + // Look up the closest cloud in time (either before or after cam.timestamp) + // This need not succeed. + dense_map::lookupCloud(cam.timestamp, bag_map[depth_topics[cam_type]], + FLAGS_max_haz_cam_image_to_depth_timestamp_diff, + // Outputs + cam.depth_cloud, + cloud_start_positions[cam_type], // care here + found_time); + } + + cams.push_back(cam); + } // end loop over camera types + } // end loop over ref images + + std::cout << "Allowed timestamp offset range while respecting the bracket for given cameras:\n"; + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here + + // So far we had the relative change. Now add the actual offset to get the max allowed offset. + lower_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + upper_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + + std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] + << ", " << upper_bound[cam_type] << "]\n"; + } + + if (FLAGS_float_timestamp_offsets) { + std::cout << "Given the user constraint the timestamp offsets will float in these ranges:\n"; + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here + + lower_bound[cam_type] = std::max(lower_bound[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + - FLAGS_timestamp_offsets_max_change); + + upper_bound[cam_type] = std::min(upper_bound[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + + FLAGS_timestamp_offsets_max_change); + + // Tighten a bit to ensure we don't exceed things when we add + // and subtract timestamps later. Note that timestamps are + // measured in seconds and fractions of a second since epoch and + // can be quite large so precision loss can easily happen. + lower_bound[cam_type] += 1.0e-5; + upper_bound[cam_type] -= 1.0e-5; + + std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] + << ", " << upper_bound[cam_type] << "]\n"; + } + } + + std::cout << "--Deal with adjustment!" << std::endl; + for (size_t it = 0; it < cams.size(); it++) { + if (cams[it].camera_type == 2) { + dense_map::adjustImageSize(cam_params[2], cams[it].image); + } + } + + std::sort(cams.begin(), cams.end(), dense_map::timestampLess); + + if (FLAGS_verbose) { + int count = 10000; + std::vector image_files; + for (size_t it = 0; it < cams.size(); it++) { + std::ostringstream oss; + oss << count << "_" << cams[it].camera_type << ".jpg"; + std::string name = oss.str(); + std::cout << "--writing " << name << std::endl; + cv::imwrite(name, cams[it].image); + count++; + image_files.push_back(name); + } + } + + std::cout << "Detecting features." << std::endl; + + // Detect features using multiple threads + std::vector cid_to_descriptor_map; + std::vector cid_to_keypoint_map; + cid_to_descriptor_map.resize(cams.size()); + cid_to_keypoint_map.resize(cams.size()); + ff_common::ThreadPool thread_pool1; + for (size_t it = 0; it < cams.size(); it++) { + thread_pool1.AddTask(&dense_map::detectFeatures, cams[it].image, FLAGS_verbose, + &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + } + thread_pool1.Join(); + + dense_map::MATCH_MAP matches; + + std::vector > image_pairs; + for (size_t it1 = 0; it1 < cams.size(); it1++) { + for (size_t it2 = it1 + 1; it2 < std::min(cams.size(), it1 + FLAGS_num_overlaps + 1); it2++) { + image_pairs.push_back(std::make_pair(it1, it2)); + } + } + + std::cout << "Matching features." << std::endl; + ff_common::ThreadPool thread_pool2; + std::mutex match_mutex; + for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { + auto pair = image_pairs[pair_it]; + int left_image_it = pair.first, right_image_it = pair.second; + bool verbose = true; + thread_pool2.AddTask(&dense_map::matchFeatures, &match_mutex, + left_image_it, right_image_it, + cid_to_descriptor_map[left_image_it], + cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], + cid_to_keypoint_map[right_image_it], + verbose, &matches[pair]); + } + thread_pool2.Join(); + + // If feature A in image I matches feather B in image J, which matches feature C in image K, + // then (A, B, C) belong together into a track. Build such a track. + + std::vector, int>> keypoint_map(cams.size()); + + int num_total_matches = 0; // temporary + + // Give all interest points in a given image a unique id + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + + int left_index = index_pair.first; + int right_index = index_pair.second; + + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + keypoint_map[left_index][dist_left_ip] = 0; + keypoint_map[right_index][dist_right_ip] = 0; + num_total_matches++; + } + } + + std::cout << "--bbb num total matches " << num_total_matches << std::endl; + std::cout << "--why so many more matches than pid?" << std::endl; + std::cout << "--test adding missing pairs!" << std::endl; + std::cout << "--must do two passes!" << std::endl; + + // Give all interest points in a given image a unique id + // And put them in a vector with the id corresponding to the interest point + std::vector>> keypoint_vec(cams.size()); + for (size_t cam_it = 0; cam_it < cams.size(); cam_it++) { + int count = 0; + for (auto ip_it = keypoint_map[cam_it].begin(); ip_it != keypoint_map[cam_it].end(); ip_it++) { + ip_it->second = count; + count++; + // std::cout << "--value " << (ip_it->first).first << ' ' << (ip_it->first).second << ' ' + // << ip_it->second << std::endl; + keypoint_vec[cam_it].push_back(ip_it->first); + // std::cout << "--size is " << keypoint_vec[cam_it].size() << std::endl; + } + } + + std::cout << "--write my own function! It should just remove conflicts!" << std::endl; + + openMVG::matching::PairWiseMatches match_map; + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + + int left_index = index_pair.first; + int right_index = index_pair.second; + + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + + std::vector mvg_matches; + + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + + // std::cout << "zzz1 " << left_index << ' ' << dist_left_ip.first << ' ' << + // dist_left_ip.second << ' ' << right_index << ' ' << dist_right_ip.first << ' ' << + // dist_right_ip.second << std::endl; + + int left_id = keypoint_map[left_index][dist_left_ip]; + int right_id = keypoint_map[right_index][dist_right_ip]; + mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); + } + match_map[index_pair] = mvg_matches; + } + + if (FLAGS_verbose) { + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; + + int left_index = index_pair.first; + int right_index = index_pair.second; + + std::cout << "--indices " << left_index << ' ' << right_index << std::endl; + + std::ostringstream oss1; + oss1 << (10000 + left_index) << "_" << cams[left_index].camera_type; + + std::string left_stem = oss1.str(); + std::string left_image = left_stem + ".jpg"; + + std::ostringstream oss2; + oss2 << (10000 + right_index) << "_" << cams[right_index].camera_type; + + std::string right_stem = oss2.str(); + std::string right_image = right_stem + ".jpg"; + + std::string match_file = left_stem + "__" + right_stem + ".match"; + + std::cout << "Writing: " << left_image << ' ' << right_image << ' ' << match_file << std::endl; + dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); + } + } + + std::cout << "Find other things which need deallocation!" << std::endl; + std::cout << "De-allocate images and point clouds when no longer needed!" << std::endl; + + // not needed anymore and take up a lot of RAM + matches.clear(); matches = dense_map::MATCH_MAP(); + keypoint_map.clear(); keypoint_map.shrink_to_fit(); + cid_to_descriptor_map.clear(); cid_to_descriptor_map.shrink_to_fit(); + cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); + + std::vector > pid_to_cid_fid; + { + // Build tracks + // De-allocate these as soon as not needed to save memory + openMVG::tracks::TracksBuilder trackBuilder; + trackBuilder.Build(match_map); // Build: Efficient fusion of correspondences + trackBuilder.Filter(); // Filter: Remove tracks that have conflict + // trackBuilder.ExportToStream(std::cout); + // Export tracks as a map (each entry is a sequence of imageId and featureIndex): + // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} + openMVG::tracks::STLMAPTracks map_tracks; + trackBuilder.ExportToSTL(map_tracks); + + // TODO(oalexan1): Print how many pairwise matches were there before + // and after filtering tracks. + + if (map_tracks.empty()) + LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; + + size_t num_elems = map_tracks.size(); + // Populate the filtered tracks + pid_to_cid_fid.clear(); + pid_to_cid_fid.resize(num_elems); + size_t curr_id = 0; + for (auto itr = map_tracks.begin(); itr != map_tracks.end(); itr++) { + for (auto itr2 = (itr->second).begin(); itr2 != (itr->second).end(); itr2++) { + pid_to_cid_fid[curr_id][itr2->first] = itr2->second; + } + curr_id++; + } + } + + // The transform from every camera to the world + std::vector world_to_cam(cams.size()); + for (size_t it = 0; it < cams.size(); it++) { + int beg_index = cams[it].beg_ref_index; + int end_index = cams[it].end_ref_index; + int cam_type = cams[it].camera_type; + // std::cout << "--ref indices " << beg_index << ' ' << end_index << std::endl; + // std::cout << "--cam type " << cam_type << std::endl; + world_to_cam[it] = dense_map::calc_world_to_cam_trans + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + ref_timestamps[beg_index], ref_timestamps[end_index], + ref_to_cam_timestamp_offsets[cam_type], + cams[it].timestamp); + + // std::cout << "--trans for camera: " << it << ' ' << world_to_cam[it].matrix() << std::endl; + } + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + // std::cout << std::endl; + // std::cout << "pid is " << pid << std::endl; + // std::cout << "---aaa pid size is " << pid_to_cid_fid[pid].size() << std::endl; + // std::cout << "zzz2 "; + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + // std::cout << cid << ' ' << keypoint_vec[cid][fid].first << ' '; + // std::cout << keypoint_vec[cid][fid].second << " "; + } + // std::cout << std::endl; + } + + // Do multiview triangulation + std::vector xyz_vec(pid_to_cid_fid.size()); + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + Eigen::Vector3d mesh_xyz(0, 0, 0); + int num = 0; + + std::vector focal_length_vec; + std::vector world_to_cam_vec; + std::vector pix_vec; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); + + focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); + world_to_cam_vec.push_back(world_to_cam[cid]); + pix_vec.push_back(undist_ip); + } + + // Triangulate n rays emanating from given undistorted and centered pixels + Eigen::Vector3d joint_xyz = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); + + xyz_vec[pid] = joint_xyz; + + // std::cout << "--xyz1 " << pid << ' ' << xyz_vec[pid].transpose() << std::endl; + } + + std::cout << "--must do two passes!" << std::endl; + std::cout << "--must filter by min triangulation angle and points behind camera" << std::endl; + + // std::vector> cid_fid_to_pid; + // sparse_mapping::InitializeCidFidToPid(cams.size(), pid_to_cid_fid, &cid_fid_to_pid); + + std::cout << "---too many outliers!" << std::endl; + + std::cout << "--start selecting!" << std::endl; + + // Set up the variable blocks to optimize for BracketedCamError + std::vector bracketed_cam_block_sizes; + int num_focal_lengths = 1; + int num_distortion_params = 1; // will be overwritten + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_cam_block_sizes.push_back(num_focal_lengths); + bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); + std::cout << "--make bracketed block sizes individual!" << std::endl; + bracketed_cam_block_sizes.push_back(num_distortion_params); // must be last, will be modified later + + // Set up the variable blocks to optimize for RefCamError + std::vector ref_cam_block_sizes; + ref_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + ref_cam_block_sizes.push_back(num_focal_lengths); + ref_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); + std::cout << "--make ref block sizes individual!" << std::endl; + ref_cam_block_sizes.push_back(num_distortion_params); // must be last, will be modified later + + // Set up the variable blocks to optimize for BracketedDepthError + std::vector bracketed_depth_block_sizes; + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for RefDepthError + std::vector ref_depth_block_sizes; + ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + ref_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + + // Set up the variable blocks to optimize for the mesh xyz + std::vector mesh_block_sizes; + mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + + std::cout << "must test with the ref cam having depth!" << std::endl; + + // Form the problem + ceres::Problem problem; + std::vector residual_names; + std::vector residual_scales; + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + int cam_type = cams[cid].camera_type; + int beg_ref_index = cams[cid].beg_ref_index; + int end_ref_index = cams[cid].end_ref_index; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + + if (cam_type == ref_cam_type) { + // The cost function of projecting in the ref cam. + ceres::CostFunction* ref_cost_function = + dense_map::RefCamError::Create(dist_ip, ref_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* ref_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // std::cout << "--add block" << std::endl; + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (ref_cost_function, ref_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &xyz_vec[pid][0], + &focal_lengths[cam_type], + &optical_centers[cam_type][0], + &distortions[cam_type][0]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + } + + Eigen::Vector3d depth_xyz(0, 0, 0); + if (!dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) + continue; // could not look up the depth value + + // std::cout << "--depth xyz is " << depth_xyz.transpose() << std::endl; + + // The constraint that the depth points agree with triangulated points + ceres::CostFunction* ref_depth_cost_function + = dense_map::RefDepthError::Create(FLAGS_depth_weight, + depth_xyz, + ref_depth_block_sizes); + + ceres::LossFunction* ref_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back(cam_names[cam_type] + "_depth_x_m"); + residual_names.push_back(cam_names[cam_type] + "_depth_y_m"); + residual_names.push_back(cam_names[cam_type] + "_depth_z_m"); + residual_scales.push_back(FLAGS_depth_weight); + residual_scales.push_back(FLAGS_depth_weight); + residual_scales.push_back(FLAGS_depth_weight); + problem.AddResidualBlock + (ref_depth_cost_function, + ref_depth_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0]); + + if (!FLAGS_float_sparse_map) + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + if (!FLAGS_float_scale) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + + } else { + // Other cameras, which need bracketing + ceres::CostFunction* bracketed_cost_function = + dense_map::BracketedCamError::Create(dist_ip, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* bracketed_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // std::cout << "--add block" << std::endl; + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (bracketed_cost_function, bracketed_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type], + &focal_lengths[cam_type], + &optical_centers[cam_type][0], + &distortions[cam_type][0]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + + Eigen::Vector3d depth_xyz(0, 0, 0); + if (!dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) + continue; // could not look up the depth value + + // std::cout << "--depth xyz is " << depth_xyz.transpose() << std::endl; + + // Ensure that the depth points agree with triangulated points + ceres::CostFunction* bracketed_depth_cost_function + = dense_map::BracketedDepthError::Create(FLAGS_depth_weight, + depth_xyz, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_block_sizes); + + ceres::LossFunction* bracketed_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back(cam_names[cam_type] + "_depth_x_m"); + residual_names.push_back(cam_names[cam_type] + "_depth_y_m"); + residual_names.push_back(cam_names[cam_type] + "_depth_z_m"); + residual_scales.push_back(FLAGS_depth_weight); + residual_scales.push_back(FLAGS_depth_weight); + residual_scales.push_back(FLAGS_depth_weight); + problem.AddResidualBlock + (bracketed_depth_cost_function, + bracketed_depth_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_scale) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + } + } + } + + if (FLAGS_mesh != "") { + // Add the mesh constraint + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + Eigen::Vector3d mesh_xyz(0, 0, 0); + int num = 0; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); + + bool have_mesh_intersection = false; + // Try to make the intersection point be on the mesh and the nav cam ray + // to make the sci cam to conform to that. + // TODO(oalexan1): Think more of the range of the ray below + double min_ray_dist = 0.0; + double max_ray_dist = 10.0; + Eigen::Vector3d intersection(0.0, 0.0, 0.0); + have_mesh_intersection + = dense_map::ray_mesh_intersect(undist_ip, cam_params[cams[cid].camera_type], + world_to_cam[cid], mesh, bvh_tree, + min_ray_dist, max_ray_dist, + // Output + intersection); + + if (have_mesh_intersection) { + mesh_xyz += intersection; + num += 1; + } + } + + if (num >= 1) { + mesh_xyz /= num; + + // std::cout << "--num and mesh xyz is " << num << ' ' << mesh_xyz.transpose() << std::endl; + + ceres::CostFunction* mesh_cost_function = + dense_map::XYZError::Create(mesh_xyz, mesh_block_sizes, FLAGS_mesh_weight); + + ceres::LossFunction* mesh_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, + &xyz_vec[pid][0]); + + residual_names.push_back("mesh_x_m"); + residual_names.push_back("mesh_y_m"); + residual_names.push_back("mesh_z_m"); + + residual_scales.push_back(FLAGS_mesh_weight); + residual_scales.push_back(FLAGS_mesh_weight); + residual_scales.push_back(FLAGS_mesh_weight); + } + } + // std::cout << "--xyz1 " << pid << ' ' << xyz_vec[pid].transpose() << std::endl; + } + + // See which intrinsics from which cam to float or keep fixed + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (intrinsics_to_float[cam_type].find("focal_length") == intrinsics_to_float[cam_type].end()) { + std::cout << "For " << cam_names[cam_type] << " not floating focal_length." << std::endl; + problem.SetParameterBlockConstant(&focal_lengths[cam_type]); + } else { + std::cout << "For " << cam_names[cam_type] << " floating focal_length." << std::endl; + } + if (intrinsics_to_float[cam_type].find("optical_center") == intrinsics_to_float[cam_type].end()) { + std::cout << "For " << cam_names[cam_type] << " not floating optical_center." << std::endl; + problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); + } else { + std::cout << "For " << cam_names[cam_type] << " floating optical_center." << std::endl; + } + if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) { + std::cout << "For " << cam_names[cam_type] << " not floating distortion." << std::endl; + problem.SetParameterBlockConstant(&distortions[cam_type][0]); + } else { + std::cout << "For " << cam_names[cam_type] << " floating distortion." << std::endl; + } + } + + // Evaluate the residual before optimization + double total_cost = 0.0; + std::vector residuals; + ceres::Problem::EvaluateOptions eval_options; + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale + residuals[it] /= residual_scales[it]; + dense_map::calc_median_residuals(residuals, residual_names, "before opt"); + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "initial res " << residual_names[it] << " " << residuals[it] << std::endl; + } + + // Copy the offsets to specific cameras + std::cout.precision(18); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + std::cout << "--offset before " << ref_to_cam_timestamp_offsets[cam_type] << std::endl; + + // Solve the problem + ceres::Solver::Options options; + ceres::Solver::Summary summary; + options.linear_solver_type = ceres::ITERATIVE_SCHUR; + options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread + options.max_num_iterations = FLAGS_num_iterations; + options.minimizer_progress_to_stdout = true; + options.gradient_tolerance = 1e-16; + options.function_tolerance = 1e-16; + options.parameter_tolerance = FLAGS_parameter_tolerance; + ceres::Solve(options, &problem, &summary); + + // Evaluate the residual after optimization + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale + residuals[it] /= residual_scales[it]; + dense_map::calc_median_residuals(residuals, residual_names, "after opt"); + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "final res " << residual_names[it] << " " << residuals[it] << std::endl; + } + + // Copy back the reference transforms + std::cout.precision(18); + std::cout << "--sparse map before\n" << sparse_map->cid_to_cam_t_global_[0].matrix() << std::endl; + + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::array_to_rigid_transform(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + std::cout << "--sparse map after\n" << sparse_map->cid_to_cam_t_global_[0].matrix() << std::endl; + + // Copy back the optimized intrinsics + for (int it = 0; it < num_cam_types; it++) { + cam_params[it].SetFocalLength(Eigen::Vector2d(focal_lengths[it], focal_lengths[it])); + cam_params[it].SetOpticalOffset(optical_centers[it]); + cam_params[it].SetDistortion(distortions[it]); + } + + // Copy back the optimized extrinsics + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::array_to_rigid_transform + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Copy back the depth to image transforms without scales + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::array_to_rigid_transform + (depth_to_image_noscale[cam_type], + &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Copy extrinsics to specific cameras + hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); + scicam_to_hazcam_aff_trans = hazcam_to_navcam_aff_trans.inverse() * ref_to_cam_trans[2].inverse(); + + // Copy intrinsics to the specific cameras + if (FLAGS_nav_cam_intrinsics_to_float != "") nav_cam_params = cam_params[0]; + if (FLAGS_haz_cam_intrinsics_to_float != "") haz_cam_params = cam_params[1]; + if (FLAGS_sci_cam_intrinsics_to_float != "") sci_cam_params = cam_params[2]; + + // Copy the offsets to specific cameras + std::cout.precision(18); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + std::cout << "--offset after " << ref_to_cam_timestamp_offsets[cam_type] << std::endl; + + navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; + scicam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1] + - ref_to_cam_timestamp_offsets[2]; + + std::cout.precision(18); + std::cout << "--navcam_to_hazcam_timestamp_offset " << navcam_to_hazcam_timestamp_offset << std::endl; + std::cout << "--scicam_to_hazcam_timestamp_offset " << scicam_to_hazcam_timestamp_offset << std::endl; + + // Update the optimized depth to image (for haz cam only) + int cam_type = 1; // haz cam + hazcam_depth_to_image_scale = depth_to_image_scales[cam_type]; + hazcam_depth_to_image_transform = depth_to_image_noscale[cam_type]; + hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; + + // Update the config file + { + // update nav and haz + bool update_cam1 = true, update_cam2 = true; + std::string cam1_name = "nav_cam", cam2_name = "haz_cam"; + boost::shared_ptr + cam1_params(new camera::CameraParameters(nav_cam_params)); + boost::shared_ptr + cam2_params(new camera::CameraParameters(haz_cam_params)); + bool update_depth_to_image_transform = true; + bool update_extrinsics = true; + bool update_timestamp_offset = true; + std::string cam1_to_cam2_timestamp_offset_str = "navcam_to_hazcam_timestamp_offset"; + // Modify in-place the robot config file + dense_map::update_config_file(update_cam1, cam1_name, cam1_params, + update_cam2, cam2_name, cam2_params, + update_depth_to_image_transform, + hazcam_depth_to_image_transform, update_extrinsics, + hazcam_to_navcam_aff_trans, update_timestamp_offset, + cam1_to_cam2_timestamp_offset_str, + navcam_to_hazcam_timestamp_offset); + } + { + // update sci and haz cams + // TODO(oalexan1): Write a single function to update all 3 of them + bool update_cam1 = true, update_cam2 = true; + std::string cam1_name = "sci_cam", cam2_name = "haz_cam"; + boost::shared_ptr + cam1_params(new camera::CameraParameters(sci_cam_params)); + boost::shared_ptr + cam2_params(new camera::CameraParameters(haz_cam_params)); + bool update_depth_to_image_transform = true; + bool update_extrinsics = true; + bool update_timestamp_offset = true; + std::string cam1_to_cam2_timestamp_offset_str = "scicam_to_hazcam_timestamp_offset"; + // Modify in-place the robot config file + dense_map::update_config_file(update_cam1, cam1_name, cam1_params, + update_cam2, cam2_name, cam2_params, + update_depth_to_image_transform, + hazcam_depth_to_image_transform, update_extrinsics, + scicam_to_hazcam_aff_trans.inverse(), + update_timestamp_offset, + cam1_to_cam2_timestamp_offset_str, + scicam_to_hazcam_timestamp_offset); + } + + std::cout << "Writing: " << FLAGS_output_map << std::endl; + if (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "") { + std::cout << "Either the sparse map intrinsics or cameras got modified. " + << "The map must be rebuilt." << std::endl; + + dense_map::RebuildMap(FLAGS_output_map, // Will be used for temporary saving of aux data + nav_cam_params, sparse_map); + } + + sparse_map->Save(FLAGS_output_map); + + return 0; +} // NOLINT + diff --git a/dense_map/geometry_mapper/tools/colorize_bag_images.cc b/dense_map/geometry_mapper/tools/colorize_bag_images.cc new file mode 100644 index 00000000..98965e77 --- /dev/null +++ b/dense_map/geometry_mapper/tools/colorize_bag_images.cc @@ -0,0 +1,153 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +// A tool to make the bag images in color, for testing purposes. The +// chosen colors are rather arbitrary. If the images are already in +// color, their colors will be altered as one can't tell if the image +// read in is color or grayscale, since it will have 3 channels either +// way. Compressed images will be decompressed and won't be compressed +// back. + +#include +#include +#include + +#include +#include + +#include + +#include +#include + +// ROS +#include +#include +#include +#include +#include + +#include +#include +#include + +DEFINE_string(input_bag, "", "The input bag."); + +DEFINE_string(output_bag, "", "The output bag."); + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + + if (FLAGS_input_bag == "" || FLAGS_output_bag == "") { + std::cout << "Not all inputs were specified.\n"; + return 1; + } + + int tmp_count = 10000; + + // Open the output bag + std::cout << "Opening the output bag file: " << FLAGS_output_bag << "\n"; + rosbag::Bag output_bag; + output_bag.open(FLAGS_output_bag, rosbag::bagmode::Write); + + // Get the topics from the input bag + std::vector topics; + dense_map::readTopicsInBag(FLAGS_input_bag, topics); + + rosbag::Bag input_bag; + std::cout << "Opening the input bag file: " << FLAGS_input_bag << "\n"; + input_bag.open(FLAGS_input_bag, rosbag::bagmode::Read); + + rosbag::View view(input_bag, rosbag::TopicQuery(topics)); + for (rosbag::MessageInstance const m : view) { + bool have_image = false; + cv::Mat image; + std_msgs::Header header; + + // Try to read a compressed image + sensor_msgs::CompressedImage::ConstPtr comp_image_msg + = m.instantiate(); + if (comp_image_msg) { + // Convert the compressed image data to cv::Mat + try { + image = cv::imdecode(cv::Mat(comp_image_msg->data), cv::IMREAD_COLOR); + } catch (cv_bridge::Exception const& e) { + LOG(ERROR) << "Unable to convert compressed image to bgr8.\n"; + continue; + } + + // Use the header from the input image, including the timestamp + header = comp_image_msg->header; + have_image = true; + } + + if (!have_image) { + // Try to read a regular uncompressed image + sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); + if (image_msg) { + try { + // Copy from the temporary pointer to a stable location + (cv_bridge::toCvShare(image_msg, "bgr8")->image).copyTo(image); + } catch (cv_bridge::Exception const& e) { + LOG(ERROR) << "Unable to convert image to bgr8.\n"; + continue; + } + + // Use the header from the input image, including the timestamp + header = image_msg->header; + have_image = true; + } + } + + if (!have_image) { + // Write the message without any changes + output_bag.write(m.getTopic(), m.getTime(), m); + continue; + } + + // Tweak the pixels to create color if the input is grayscale + for (int row = 0; row < image.rows; row++) { + for (int col = 0; col < image.cols; col++) { + cv::Vec3b color = image.at(row, col); + int delta = 60; + color[0] = std::max(static_cast(color[0]), delta) - delta; + color[2] = std::max(static_cast(color[2]), delta) - delta; + image.at(row, col) = color; + } + } + + // Form the output image + cv_bridge::CvImage cv_image; + cv_image.image = image; + cv_image.encoding = "bgr8"; + sensor_msgs::Image out_image_msg; + cv_image.toImageMsg(out_image_msg); + + // Use the header from the input image, including the timestamp + out_image_msg.header = header; + + // Write it to the bag to the same topic + output_bag.write(m.getTopic() + "_color", m.getTime(), out_image_msg); + } + + input_bag.close(); + output_bag.close(); + + return 0; +} diff --git a/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc b/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc index 73c028c1..ab4bc5ca 100644 --- a/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc +++ b/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc @@ -118,10 +118,11 @@ int main(int argc, char** argv) { // Convert to an opencv image cv::Mat image; try { - image = cv_bridge::toCvShare(image_msg, "bgr8")->image; + // Do a copy as the message is temporary + (cv_bridge::toCvShare(image_msg, "bgr8")->image).copyTo(image); } catch (cv_bridge::Exception const& e) { try { - image = cv_bridge::toCvShare(image_msg, "32FC1")->image; + (cv_bridge::toCvShare(image_msg, "32FC1")->image).copyTo(image); } catch (cv_bridge::Exception const& e) { LOG(ERROR) << "Unable to convert " << image_msg->encoding.c_str() << " image to bgr8 or 32FC1"; continue; @@ -133,13 +134,15 @@ int main(int argc, char** argv) { } // Extract a compressed image - sensor_msgs::CompressedImage::ConstPtr comp_image_msg = m.instantiate(); + sensor_msgs::CompressedImage::ConstPtr comp_image_msg + = m.instantiate(); if (comp_image_msg) { ros::Time stamp = comp_image_msg->header.stamp; curr_time = stamp.toSec(); // Set up the output filename - form_filename(comp_image_msg->header.seq, curr_time, filename_buffer, sizeof(filename_buffer)); + form_filename(comp_image_msg->header.seq, curr_time, filename_buffer, + sizeof(filename_buffer)); std::string name(filename_buffer); name = output_directory + "/" + name + ".jpg"; diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index 1f460476..0836a161 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -17,6 +17,8 @@ * under the License. */ +// TODO(oalexan1): Modularize this code and move things to utils + #include #include #include @@ -178,7 +180,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, std::vector const& nav_cam_msgs, boost::shared_ptr sparse_map, bool use_brisk_map, - std::vector const& sparse_map_timestamps, Eigen::MatrixXd& desired_cam_to_world_trans, + std::vector const& sparse_map_timestamps, + Eigen::MatrixXd& desired_cam_to_world_trans, int& sparse_map_id, int& nav_cam_bag_id, int& nav_cam_pos) { // Find nav cam images around the nav cam pose to compute std::vector* nearby_cid_ptr = NULL; @@ -210,7 +213,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const cv::Mat beg_image; bool save_grayscale = true; // Localization needs grayscale images double found_time = -1.0; - if (!dense_map::lookupImage(beg_nav_cam_time, nav_cam_msgs, save_grayscale, beg_image, nav_cam_pos, found_time)) + if (!dense_map::lookupImage(beg_nav_cam_time, nav_cam_msgs, save_grayscale, + beg_image, nav_cam_pos, found_time)) return false; camera::CameraModel beg_localized_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), @@ -225,8 +229,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const // the beg image next time around. int end_nav_cam_pos = nav_cam_pos; cv::Mat end_image; - if (!dense_map::lookupImage(end_nav_cam_time, nav_cam_msgs, save_grayscale, end_image, end_nav_cam_pos, - found_time)) + if (!dense_map::lookupImage(end_nav_cam_time, nav_cam_msgs, save_grayscale, end_image, + end_nav_cam_pos, found_time)) return false; camera::CameraModel end_localized_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), sparse_map->GetCameraParameters()); @@ -348,6 +352,8 @@ double median_mesh_edge(cv::Mat const& depthMat) { void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero) { double median_len = median_mesh_edge(depthMat); + if (median_len <= 0) return; // should not happen + // Add points if lengths are more than this double min_len = 1.3 * median_len; @@ -446,7 +452,8 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero) { if (count >= num_points) break; } - if (count != num_points) LOG(FATAL) << "Bookkeeping error. Did not add all the points to the depth map."; + if (count != num_points) + LOG(FATAL) << "Bookkeeping error. Did not add all the points to the depth map."; } // Form a mesh. Ignore (x, y, z, i, w) values with (x, y, z) = (0, 0, 0). @@ -711,8 +718,8 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Z } // Smooth newly added points and also their immediate neighbors -void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Zero, - int radius) { +void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMat, + double sigma, Vec5d const& Zero, int radius) { // Copy depthMat to workMat, with the latter not being changed below. // Note how we use origMat to keep track of points which did not // change recently. @@ -813,7 +820,8 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co if (static_cast(haz_cam_intensity.cols) != static_cast(pc_msg->width) || static_cast(haz_cam_intensity.rows) != static_cast(pc_msg->height)) LOG(FATAL) << "Depth image dimensions does not agree with point cloud dimensions."; - if (dense_map::matType(haz_cam_intensity) != "8UC1") LOG(FATAL) << "Extracted depth image should be of type: 8UC1."; + if (dense_map::matType(haz_cam_intensity) != "8UC3") + LOG(FATAL) << "Extracted depth image should be of type 8UC3."; // Extract the depth point cloud from the message pcl::PointCloud pc; @@ -839,7 +847,10 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co double x = pc.points[count].x; double y = pc.points[count].y; double z = pc.points[count].z; - double i = haz_cam_intensity.at(row, col); + + // Pick first channel. The precise color of the depth cloud is not important. + // It will be wiped later during smoothing and hole-filling anyway. + double i = haz_cam_intensity.at(row, col)[0]; bool skip = (col < depth_exclude_columns || depthMat.cols - col < depth_exclude_columns || row < depth_exclude_rows || depthMat.rows - row < depth_exclude_rows); @@ -935,13 +946,14 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co pcl::io::savePCDFileASCII(filename_buffer, pci); if (save_debug_data) { - // Find the image of distances in depthMat, for debugging. Note + // Find the image of distances in depthMat, for debugging. Note // that we ignore the extra points we added as new rows depthMat in // add_extra_pts(). // The depthMat may have extra rows, but we assume the number of columns // did not change. - if (haz_cam_intensity.cols != depthMat.cols) LOG(FATAL) << "Incorrect number of columns in depthMat\n"; + if (haz_cam_intensity.cols != depthMat.cols) + LOG(FATAL) << "Incorrect number of columns in depthMat\n"; cv::Mat depthDist(haz_cam_intensity.rows, haz_cam_intensity.cols, CV_32FC1, cv::Scalar(0)); count = -1; @@ -1007,13 +1019,12 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co } } -#if 0 - // This is an instructive code to add to saveCameraPoses() // to compare the sci cam pose as found via localizing // the nav cam images vs localizing the sci cam image directly. // The conclusion is that the sci cam calibration needs // improvement, as the results are all over the place. +#if 0 void localize_sci_cam_directly() { std::cout.precision(8); while (cam_type == "sci_cam" && !use_brisk_map) { @@ -1119,584 +1130,612 @@ void localize_sci_cam_directly() { } #endif - // Given a desired camera and a set of acquisition timestamps for it, - // find the nav cam images nearest in time bracketing each timestamp, - // find the poses of those images using localization, interpolate the - // nav cam pose at the desired timestamp, and apply the transform - // between the nav cam and the desired camera to find the pose of the - // desired camera (by which it is meant the transform from the desired - // camera to the world). - - // Also write the extracted point clouds and the desired cam (nav or sci) images. - - void saveCameraPoses(bool simulated_data, std::string const& cam_type, - double desired_cam_to_nav_cam_offset, - Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, - Eigen::Affine3d& hazcam_depth_to_image_transform, - int depth_exclude_columns, int depth_exclude_rows, - double foreshortening_delta, double depth_hole_fill_diameter, - double reliability_weight_exponent, - std::vector const& median_filter_params, - bool save_debug_data, - std::vector const& nav_cam_msgs, - std::vector const& sci_cam_msgs, - std::vector const& haz_cam_points_msgs, - std::vector const& haz_cam_intensity_msgs, - std::vector const& nav_cam_bag_timestamps, - std::map> const& sci_cam_exif, - std::string const& output_dir, std::string const& desired_cam_dir, - double start, double duration, - double sampling_spacing_seconds, - double dist_between_processed_cams, std::set const& sci_cam_timestamps, - double max_iso_times_exposure, - boost::shared_ptr sparse_map, bool use_brisk_map, - dense_map::StampedPoseStorage const& sim_desired_cam_poses, - std::string const& external_mesh) { - double min_map_timestamp = std::numeric_limits::max(); - double max_map_timestamp = -min_map_timestamp; - std::vector sparse_map_timestamps; - if (!simulated_data && !use_brisk_map) { - // Find the minimum and maximum timestamps in the sparse map - const std::vector& images = sparse_map->cid_to_filename_; - sparse_map_timestamps.resize(images.size()); - for (size_t cid = 0; cid < images.size(); cid++) { - double timestamp = dense_map::fileNameToTimestamp(images[cid]); - sparse_map_timestamps[cid] = timestamp; - min_map_timestamp = std::min(min_map_timestamp, sparse_map_timestamps[cid]); - max_map_timestamp = std::max(max_map_timestamp, sparse_map_timestamps[cid]); - } +// Given a desired camera and a set of acquisition timestamps for it, +// find the nav cam images nearest in time bracketing each timestamp, +// find the poses of those images using localization, interpolate the +// nav cam pose at the desired timestamp, and apply the transform +// between the nav cam and the desired camera to find the pose of the +// desired camera (by which it is meant the transform from the desired +// camera to the world). + +// Also write the extracted point clouds and the desired cam (nav or sci) images. + +void saveCameraPoses(bool simulated_data, std::string const& cam_type, + double desired_cam_to_nav_cam_offset, + Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, + Eigen::Affine3d& hazcam_depth_to_image_transform, + int depth_exclude_columns, int depth_exclude_rows, + double foreshortening_delta, double depth_hole_fill_diameter, + double reliability_weight_exponent, + std::vector const& median_filter_params, + bool save_debug_data, + std::vector const& nav_cam_msgs, + std::vector const& sci_cam_msgs, + std::vector const& haz_cam_points_msgs, + std::vector const& haz_cam_intensity_msgs, + std::vector const& nav_cam_bag_timestamps, + std::map> const& sci_cam_exif, + std::string const& output_dir, std::string const& desired_cam_dir, + double start, double duration, + double sampling_spacing_seconds, + double dist_between_processed_cams, std::set const& sci_cam_timestamps, + double max_iso_times_exposure, + boost::shared_ptr sparse_map, bool use_brisk_map, + dense_map::StampedPoseStorage const& sim_desired_cam_poses, + std::string const& external_mesh) { + double min_map_timestamp = std::numeric_limits::max(); + double max_map_timestamp = -min_map_timestamp; + std::vector sparse_map_timestamps; + if (!simulated_data && !use_brisk_map) { + // Find the minimum and maximum timestamps in the sparse map + const std::vector& images = sparse_map->cid_to_filename_; + sparse_map_timestamps.resize(images.size()); + for (size_t cid = 0; cid < images.size(); cid++) { + double timestamp = dense_map::fileNameToTimestamp(images[cid]); + sparse_map_timestamps[cid] = timestamp; + min_map_timestamp = std::min(min_map_timestamp, sparse_map_timestamps[cid]); + max_map_timestamp = std::max(max_map_timestamp, sparse_map_timestamps[cid]); } + } - // Open a handle for writing the index of files being written to disk - std::string index_file = output_dir + "/" + cam_type + "_index.txt"; - std::ofstream ofs(index_file.c_str()); - - // Keep track of where in the bags we are. - int desired_cam_pos = 0; // can be either nav, sci, or haz cam intensity position - int nav_cam_pos = 0; // track separately the nav cam position as well - - // These two are camera ids in the map used for different - // purposes. We increment them as we travel forward in time. - int sparse_map_id = 0, nav_cam_bag_id = 0; - - // These are used to track time as the bag is traversed - double beg_time = -1.0, prev_time = -1.0; - - // This is used to ensure cameras are not too close - Eigen::Vector3d prev_cam_ctr(std::numeric_limits::quiet_NaN(), 0, 0); - - // Decide on which messages to process - std::vector const* desired_cam_msgs = NULL; - if (cam_type == "haz_cam") { - desired_cam_msgs = &haz_cam_points_msgs; // points topic - } else if (cam_type == "sci_cam") { - desired_cam_msgs = &sci_cam_msgs; // image topic - } else if (cam_type == "nav_cam") { - desired_cam_msgs = &nav_cam_msgs; // image topic - } else { - LOG(FATAL) << "Unknown camera type: " << cam_type; - } + // Open a handle for writing the index of files being written to disk + std::string index_file = output_dir + "/" + cam_type + "_index.txt"; + std::ofstream ofs(index_file.c_str()); + + // For the haz cam, need an image index, which will be haz_cam_index.txt, + // and a point cloud index, which will be depth_cam_index.txt. + std::string depth_index_file = output_dir + "/" + "depth_cam" + "_index.txt"; + std::ofstream depth_ofs; + if (cam_type == "haz_cam") depth_ofs = std::ofstream(depth_index_file.c_str()); + + // Keep track of where in the bags we are. + int desired_cam_pos = 0; // can be either nav, sci, or haz cam intensity position + int nav_cam_pos = 0; // track separately the nav cam position as well + + // These two are camera ids in the map used for different + // purposes. We increment them as we travel forward in time. + int sparse_map_id = 0, nav_cam_bag_id = 0; + + // These are used to track time as the bag is traversed + double beg_time = -1.0, prev_time = -1.0; + + // This is used to ensure cameras are not too close + Eigen::Vector3d prev_cam_ctr(std::numeric_limits::quiet_NaN(), 0, 0); + + // Decide on which messages to process + std::vector const* desired_cam_msgs = NULL; + if (cam_type == "haz_cam") { + desired_cam_msgs = &haz_cam_points_msgs; // points topic + } else if (cam_type == "sci_cam") { + desired_cam_msgs = &sci_cam_msgs; // image topic + } else if (cam_type == "nav_cam") { + desired_cam_msgs = &nav_cam_msgs; // image topic + } else { + LOG(FATAL) << "Unknown camera type: " << cam_type; + } - if (external_mesh == "") { - // Compute the starting bag time as the timestamp of the first cloud - for (auto& m : haz_cam_points_msgs) { - if (beg_time < 0) { - sensor_msgs::PointCloud2::ConstPtr pc_msg; - pc_msg = m.instantiate(); - if (pc_msg) { - beg_time = pc_msg->header.stamp.toSec(); - break; - } + if (external_mesh == "") { + // Compute the starting bag time as the timestamp of the first cloud + for (auto& m : haz_cam_points_msgs) { + if (beg_time < 0) { + sensor_msgs::PointCloud2::ConstPtr pc_msg; + pc_msg = m.instantiate(); + if (pc_msg) { + beg_time = pc_msg->header.stamp.toSec(); + break; } } - } else { - // Use an external mesh, so ignore the depth clouds (may even be absent) - beg_time = min_map_timestamp; } + } else { + // Use an external mesh, so ignore the depth clouds (may even be absent) + beg_time = min_map_timestamp; + } - for (auto& m : *desired_cam_msgs) { - sensor_msgs::PointCloud2::ConstPtr pc_msg; - double curr_time = -1.0; + for (auto& m : *desired_cam_msgs) { + sensor_msgs::PointCloud2::ConstPtr pc_msg; + double curr_time = -1.0; - if (cam_type == "haz_cam") { - // haz cam - pc_msg = m.instantiate(); - if (!pc_msg) continue; - curr_time = pc_msg->header.stamp.toSec(); + if (cam_type == "haz_cam") { + // haz cam + pc_msg = m.instantiate(); + if (!pc_msg) continue; + curr_time = pc_msg->header.stamp.toSec(); + } else { + // sci cam or nav cam + // Check for image + sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); + if (image_msg) { + curr_time = image_msg->header.stamp.toSec(); } else { - // sci cam or nav cam - // Check for image - sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); - if (image_msg) { - curr_time = image_msg->header.stamp.toSec(); + // Check for compressed image + sensor_msgs::CompressedImage::ConstPtr comp_image_msg + = m.instantiate(); + if (comp_image_msg) { + curr_time = comp_image_msg->header.stamp.toSec(); } else { - // Check for compressed image - sensor_msgs::CompressedImage::ConstPtr comp_image_msg = m.instantiate(); - if (comp_image_msg) { - curr_time = comp_image_msg->header.stamp.toSec(); - } else { - // Not an image or compressed image topic - continue; - } + // Not an image or compressed image topic + continue; } } + } - bool custom_timestamps = (cam_type == "sci_cam" && !sci_cam_timestamps.empty()); - if (!simulated_data && !use_brisk_map && !custom_timestamps) { - if (curr_time < min_map_timestamp) continue; // Do not process data before the sparse map starts - if (curr_time > max_map_timestamp) break; // Do not process data after the sparse map ends - } - - if (curr_time - beg_time < start) continue; // did not yet reach the desired starting time - - if (duration > 0 && curr_time - beg_time > start + duration) break; // past the desired ending time - - // The pose we will solve for - bool lookup_success = false; - Eigen::MatrixXd desired_cam_to_world_trans; + bool custom_timestamps = (cam_type == "sci_cam" && !sci_cam_timestamps.empty()); + if (!simulated_data && !use_brisk_map && !custom_timestamps) { + if (curr_time < min_map_timestamp) continue; // Do not process data before the sparse map starts + if (curr_time > max_map_timestamp) break; // Do not process data after the sparse map ends + } - // Let some time elapse between fusing clouds - if (std::abs(curr_time - prev_time) < sampling_spacing_seconds && !custom_timestamps) { - continue; - } + if (curr_time - beg_time < start) continue; // did not yet reach the desired starting time - // The case of custom timestamps - if (custom_timestamps && sci_cam_timestamps.find(curr_time) == sci_cam_timestamps.end()) continue; - - if (!simulated_data) { - // Do localization based on nav cam, transform the pose to desired cam coordinates, etc - // Note that we convert from the desired time to the nav cam time for this operation - if (!findInterpolatedPose(curr_time + desired_cam_to_nav_cam_offset, nav_cam_bag_timestamps, - desired_cam_to_nav_cam_transform, nav_cam_msgs, sparse_map, use_brisk_map, - sparse_map_timestamps, desired_cam_to_world_trans, sparse_map_id, nav_cam_bag_id, - nav_cam_pos)) { - std::cout.precision(17); - std::cout << "Localization failed at time " << curr_time << std::endl; - std::cout << "if too many such failures, consider modifying the value of " - << "--localization_options. See the documentation for more info." << std::endl; - continue; - } - } else { - // In this case the logic is much simpler. We already have the - // desired camera poses. Yet, the poses and images for a given - // camera need not be acquired at the same time, even in - // simulation, so, do interpolation. - Eigen::Affine3d out_pose; - double max_gap = 1.0e+10; // hoping the simulator is functioning well - if (!sim_desired_cam_poses.interpPose(curr_time, max_gap, out_pose)) continue; - desired_cam_to_world_trans = out_pose.matrix(); - } + if (duration > 0 && curr_time - beg_time > start + duration) + break; // past the desired ending time - // Success localizing. Update the time at which it took place for next time. - prev_time = curr_time; + // The pose we will solve for + bool lookup_success = false; + Eigen::MatrixXd desired_cam_to_world_trans; - std::cout << "Time elapsed since the bag started: " << curr_time - beg_time << "\n"; + // Let some time elapse between fusing clouds + if (std::abs(curr_time - prev_time) < sampling_spacing_seconds && !custom_timestamps) { + continue; + } - // See if to skip some images if the robot did not move much - Eigen::Affine3d curr_trans; - curr_trans.matrix() = desired_cam_to_world_trans; - Eigen::Vector3d curr_cam_ctr = curr_trans.translation(); - double curr_dist_bw_cams = (prev_cam_ctr - curr_cam_ctr).norm(); - if (!std::isnan(prev_cam_ctr[0]) && !custom_timestamps && curr_dist_bw_cams < dist_between_processed_cams) { - std::cout << "Distance to previously processed camera is " << curr_dist_bw_cams << ", skipping it." - << std::endl; + // The case of custom timestamps + if (custom_timestamps && sci_cam_timestamps.find(curr_time) == sci_cam_timestamps.end()) continue; + + if (!simulated_data) { + // Do localization based on nav cam, transform the pose to desired cam coordinates, etc + // Note that we convert from the desired time to the nav cam time for this operation + if (!findInterpolatedPose(curr_time + desired_cam_to_nav_cam_offset, nav_cam_bag_timestamps, + desired_cam_to_nav_cam_transform, nav_cam_msgs, sparse_map, + use_brisk_map, + sparse_map_timestamps, desired_cam_to_world_trans, + sparse_map_id, nav_cam_bag_id, + nav_cam_pos)) { + std::cout.precision(17); + std::cout << "Localization failed at time " << curr_time << std::endl; + std::cout << "if too many such failures, consider modifying the value of " + << "--localization_options. See the documentation for more info." << std::endl; continue; } + } else { + // In this case the logic is much simpler. We already have the + // desired camera poses. Yet, the poses and images for a given + // camera need not be acquired at the same time, even in + // simulation, so, do interpolation. + Eigen::Affine3d out_pose; + double max_gap = 1.0e+10; // hoping the simulator is functioning well + if (!sim_desired_cam_poses.interpPose(curr_time, max_gap, out_pose)) continue; + desired_cam_to_world_trans = out_pose.matrix(); + } - char filename_buffer[1000]; - std::string suffix = cam_type + "_to_world"; - - if (cam_type == "haz_cam") { - cv::Mat haz_cam_intensity; - bool save_grayscale = true; - - // Normally try to look up the image time right after the current cloud time - double desired_image_time = curr_time; - double found_time = -1.0; - if (!dense_map::lookupImage(desired_image_time, haz_cam_intensity_msgs, save_grayscale, haz_cam_intensity, - desired_cam_pos, found_time)) - continue; + // Success localizing. Update the time at which it took place for next time. + prev_time = curr_time; + + std::cout << "Time elapsed since the bag started: " << curr_time - beg_time << "\n"; + + // See if to skip some images if the robot did not move much + Eigen::Affine3d curr_trans; + curr_trans.matrix() = desired_cam_to_world_trans; + Eigen::Vector3d curr_cam_ctr = curr_trans.translation(); + double curr_dist_bw_cams = (prev_cam_ctr - curr_cam_ctr).norm(); + if (!std::isnan(prev_cam_ctr[0]) && !custom_timestamps && + curr_dist_bw_cams < dist_between_processed_cams) { + std::cout << "Distance to previously processed camera is " + << curr_dist_bw_cams << ", skipping it." + << std::endl; + continue; + } - // Save the transform (only if the above lookup was successful) - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", output_dir.c_str(), curr_time, - suffix.c_str()); - dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + char filename_buffer[1000]; + std::string suffix = cam_type + "_to_world"; - // Save the transform name in the index file - ofs << filename_buffer << "\n"; + if (cam_type == "haz_cam") { + cv::Mat haz_cam_intensity; + bool save_grayscale = false; // Get a color image, if possible - // Save the cloud - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.pcd", output_dir.c_str(), curr_time); + // Normally try to look up the image time right after the current cloud time + double desired_image_time = curr_time; + double found_time = -1.0; + if (!dense_map::lookupImage(desired_image_time, haz_cam_intensity_msgs, + save_grayscale, haz_cam_intensity, + desired_cam_pos, found_time)) + continue; - // Append the intensity to the point cloud and save it - save_proc_depth_cloud(pc_msg, haz_cam_intensity, hazcam_depth_to_image_transform, depth_exclude_columns, - depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, - reliability_weight_exponent, median_filter_params, save_debug_data, filename_buffer, - desired_cam_to_world_trans); + // Save the transform (only if the above lookup was successful) + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", output_dir.c_str(), + curr_time, suffix.c_str()); + dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + + // Save the transform name in the index file + depth_ofs << filename_buffer << "\n"; // for the depth + + // Append the intensity to the point cloud and save it + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.pcd", + output_dir.c_str(), curr_time); + save_proc_depth_cloud(pc_msg, haz_cam_intensity, hazcam_depth_to_image_transform, + depth_exclude_columns, + depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, + reliability_weight_exponent, median_filter_params, + save_debug_data, filename_buffer, + desired_cam_to_world_trans); + + // Save the name of the point cloud to the index + depth_ofs << filename_buffer << "\n"; + + // Save the image + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", + desired_cam_dir.c_str(), curr_time); + std::cout << "Writing: " << filename_buffer << std::endl; + cv::imwrite(filename_buffer, haz_cam_intensity); - // Save the name of the point cloud to the index - ofs << filename_buffer << "\n"; + // Save the name of the image in the index + ofs << filename_buffer << "\n"; - } else { - // Save the nav or sci cam image at the given timestamp. - cv::Mat desired_cam_image; - bool save_grayscale = false; - double found_time = -1.0; - if (!dense_map::lookupImage(curr_time, *desired_cam_msgs, save_grayscale, desired_cam_image, desired_cam_pos, - found_time)) - continue; + } else { + // Save the nav or sci cam image at the given timestamp. + cv::Mat desired_cam_image; + bool save_grayscale = false; + double found_time = -1.0; + if (!dense_map::lookupImage(curr_time, *desired_cam_msgs, save_grayscale, + desired_cam_image, desired_cam_pos, found_time)) + continue; - // Save the transform (only if the above lookup was successful) - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", output_dir.c_str(), curr_time, - suffix.c_str()); - dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); - - // This is very useful for debugging things with ASP. - - // A debug utility for saving a camera in format ASP understands - // if (cam_type == "sci_cam") - // dense_map::save_tsai_camera(desired_cam_to_world_trans, output_dir, curr_time, suffix); - - // Apply an optional scale. Use a pointer to not copy the data more than - // one has to. - cv::Mat* img_ptr = &desired_cam_image; - cv::Mat scaled_image; - if (!simulated_data && cam_type == "sci_cam") { - auto exif_it = sci_cam_exif.find(curr_time); - if (exif_it != sci_cam_exif.end()) { - std::vector const& exif = exif_it->second; - double iso = exif[dense_map::ISO]; - double exposure = exif[dense_map::EXPOSURE_TIME]; - dense_map::exposureCorrection(max_iso_times_exposure, iso, exposure, *img_ptr, scaled_image); - img_ptr = &scaled_image; - } + // Save the transform (only if the above lookup was successful) + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", + output_dir.c_str(), curr_time, suffix.c_str()); + dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + + // This is very useful for debugging things with ASP. + + // A debug utility for saving a camera in format ASP understands + // if (cam_type == "sci_cam") + // dense_map::save_tsai_camera(desired_cam_to_world_trans, output_dir, curr_time, suffix); + + // Apply an optional scale. Use a pointer to not copy the data more than + // one has to. + cv::Mat* img_ptr = &desired_cam_image; + cv::Mat scaled_image; + if (!simulated_data && cam_type == "sci_cam") { + auto exif_it = sci_cam_exif.find(curr_time); + if (exif_it != sci_cam_exif.end()) { + std::vector const& exif = exif_it->second; + double iso = exif[dense_map::ISO]; + double exposure = exif[dense_map::EXPOSURE_TIME]; + dense_map::exposureCorrection(max_iso_times_exposure, iso, exposure, + *img_ptr, scaled_image); + img_ptr = &scaled_image; } - - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", desired_cam_dir.c_str(), curr_time); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, *img_ptr); - - // Save the name of the image in the index - ofs << filename_buffer << "\n"; } - // Update the previous camera center - prev_cam_ctr = curr_cam_ctr; + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", + desired_cam_dir.c_str(), curr_time); + std::cout << "Writing: " << filename_buffer << std::endl; + cv::imwrite(filename_buffer, *img_ptr); + + // Save the name of the image in the index + ofs << filename_buffer << "\n"; } - std::cout << "Wrote: " << index_file << std::endl; + // Update the previous camera center + prev_cam_ctr = curr_cam_ctr; } - void save_navcam_poses_and_images(boost::shared_ptr sparse_map, - std::vector const& nav_cam_msgs, - std::string const& output_dir, std::string const& nav_cam_dir) { - // Keep track of where in the bag we are - int nav_cam_pos = 0; + std::cout << "Wrote: " << index_file << std::endl; +} - std::string index_file = output_dir + "/nav_cam_index.txt"; +void save_navcam_poses_and_images(boost::shared_ptr sparse_map, + std::vector const& nav_cam_msgs, + std::string const& output_dir, std::string const& nav_cam_dir) { + // Keep track of where in the bag we are + int nav_cam_pos = 0; - std::ofstream ofs(index_file.c_str()); + std::string index_file = output_dir + "/nav_cam_index.txt"; - char filename_buffer[1000]; + std::ofstream ofs(index_file.c_str()); - for (unsigned int cid = 0; cid < sparse_map->cid_to_filename_.size(); cid++) { - std::string img_file = sparse_map->cid_to_filename_[cid]; + char filename_buffer[1000]; - double timestamp = dense_map::fileNameToTimestamp(img_file); + for (unsigned int cid = 0; cid < sparse_map->cid_to_filename_.size(); cid++) { + std::string img_file = sparse_map->cid_to_filename_[cid]; - std::string ext = ff_common::file_extension(img_file); - std::string cam_file = ff_common::ReplaceInStr(img_file, "." + ext, "_nav_cam_to_world.txt"); - cam_file = output_dir + "/" + ff_common::basename(cam_file); - if (cam_file == img_file) LOG(FATAL) << "Failed to replace the image extension in: " << img_file; + double timestamp = dense_map::fileNameToTimestamp(img_file); - dense_map::writeMatrix(sparse_map->cid_to_cam_t_global_[cid].inverse().matrix(), cam_file); + std::string ext = ff_common::file_extension(img_file); + std::string cam_file = ff_common::ReplaceInStr(img_file, "." + ext, "_nav_cam_to_world.txt"); + cam_file = output_dir + "/" + ff_common::basename(cam_file); + if (cam_file == img_file) LOG(FATAL) << "Failed to replace the image extension in: " << img_file; - cv::Mat nav_cam_image; - bool save_grayscale = false; - double found_time = -1.0; - if (!dense_map::lookupImage(timestamp, nav_cam_msgs, save_grayscale, nav_cam_image, nav_cam_pos, found_time)) - continue; + dense_map::writeMatrix(sparse_map->cid_to_cam_t_global_[cid].inverse().matrix(), cam_file); - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", nav_cam_dir.c_str(), timestamp); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, nav_cam_image); + cv::Mat nav_cam_image; + bool save_grayscale = false; // read color images, if found + double found_time = -1.0; + if (!dense_map::lookupImage(timestamp, nav_cam_msgs, save_grayscale, nav_cam_image, + nav_cam_pos, found_time)) + continue; - ofs << filename_buffer << "\n"; - } - } + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", nav_cam_dir.c_str(), timestamp); + std::cout << "Writing: " << filename_buffer << std::endl; + cv::imwrite(filename_buffer, nav_cam_image); - DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and sci_cam data."); - DEFINE_string(sparse_map, "", "A registered sparse map made with some of the ROS bag data."); - DEFINE_string(output_dir, "", "The full path to a directory where to write the processed data."); - DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); - DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); - DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", - "The depth camera intensity topic in the bag file."); - DEFINE_string(sci_cam_topic, "/hw/cam_sci", "The sci cam topic in the bag file."); - DEFINE_string(sci_cam_exif_topic, "/hw/sci_cam_exif", "The sci cam exif metadata topic the output bag."); - DEFINE_string(camera_type, "all", - "Specify which cameras to process. By default, process all. Options: " - "nav_cam, sci_cam."); - DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); - DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); - DEFINE_double(sampling_spacing_seconds, 0.5, - "Spacing to use, in seconds, between consecutive depth images in " - "the bag that are processed."); - DEFINE_double(dist_between_processed_cams, 0.1, - "Once an image or depth image is processed, how far the camera " - "should move (in meters) before it should process more data."); - DEFINE_string(sci_cam_timestamps, "", - "Process only these sci cam timestamps (rather than " - "any in the bag using --dist_between_processed_cams, etc.). Must be " - "a file with one timestamp per line."); - DEFINE_double(max_iso_times_exposure, 5.1, - "Apply the inverse gamma transform to " - "images, multiply them by max_iso_times_exposure/ISO/exposure_time " - "to adjust for lightning differences, then apply the gamma " - "transform back. This value should be set to the maximum observed " - "ISO * exposure_time. The default is 5.1. Not used with simulated data."); - DEFINE_int32(depth_exclude_columns, 0, - "Remove this many columns of data from the rectangular depth image sensor " - "at margins to avoid some distortion of that data."); - DEFINE_int32(depth_exclude_rows, 0, - "Remove this many rows of data from the rectangular depth image sensor " - "at margins to avoid some distortion of that data."); - DEFINE_double(foreshortening_delta, 5.0, - "A smaller value here will result in holes in depth images being filled more " - "aggressively but potentially with more artifacts in foreshortened regions."); - DEFINE_string(median_filters, "7 0.1 25 0.1", - "Given a list 'w1 d1 w2 d2 ... ', remove a depth image point " - "if it differs, in the Manhattan norm, from the median of cloud points " - "in the pixel window of size wi centered at it by more than di. This " - "removes points sticking out for each such i."); - DEFINE_double(depth_hole_fill_diameter, 30.0, - "Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds " - "are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh " - "later (--max_hole_diameter)."); - DEFINE_double(reliability_weight_exponent, 2.0, - "A larger value will give more weight to depth points corresponding to " - "pixels closer to depth image center, which are considered more reliable."); - DEFINE_bool(simulated_data, false, - "If specified, use data recorded in simulation. " - "Then haz and sci camera poses and intrinsics should be recorded in the bag file."); - DEFINE_bool(use_brisk_map, false, - "If specified, instead of a SURF sparse map made from the same bag that needs " - "texturing, use a pre-existing and unrelated BRISK map. This map may be more " - "convenient but less reliable."); - DEFINE_string(external_mesh, "", "Use this mesh to texture the images, rather than creating one from depth data."); - DEFINE_string(scicam_to_hazcam_timestamp_offset_override_value, "", - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " - "file with this value."); - DEFINE_bool(save_debug_data, false, "Save many intermediate datasets for debugging."); - - int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - GOOGLE_PROTOBUF_VERIFY_VERSION; - - if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; - if (FLAGS_output_dir.empty()) LOG(FATAL) << "The output directory was not specified."; - if (FLAGS_sparse_map.empty() && !FLAGS_simulated_data) LOG(FATAL) << "The sparse map was not specified."; - - if (!boost::filesystem::exists(FLAGS_ros_bag)) LOG(FATAL) << "Bag does not exist: " << FLAGS_ros_bag; - - if (!boost::filesystem::exists(FLAGS_sparse_map) && !FLAGS_simulated_data) - LOG(FATAL) << "Sparse map does not exist: " << FLAGS_sparse_map; - - // Parse the median filter params - double val; - std::vector median_filter_params; - std::istringstream ifs(FLAGS_median_filters); - while (ifs >> val) median_filter_params.push_back(val); - - if (median_filter_params.size() % 2 != 0) - LOG(FATAL) << "There must exist an even number of median filter parameters, " - << "being pairs of window sizes and distances."; - - // Need high precision to print timestamps - std::cout.precision(17); - - // Read simulated poses - dense_map::StampedPoseStorage sim_sci_cam_poses, sim_haz_cam_poses; - if (FLAGS_simulated_data) { - std::string haz_cam_pose_topic = std::string("/") + TOPIC_HAZ_CAM_SIM_POSE; - std::string sci_cam_pose_topic = std::string("/") + TOPIC_SCI_CAM_SIM_POSE; - std::cout << "haz cam pose topic " << haz_cam_pose_topic << std::endl; - std::cout << "sci cam pose topic " << sci_cam_pose_topic << std::endl; - dense_map::readBagPoses(FLAGS_ros_bag, haz_cam_pose_topic, sim_haz_cam_poses); - dense_map::readBagPoses(FLAGS_ros_bag, sci_cam_pose_topic, sim_sci_cam_poses); - } + ofs << filename_buffer << "\n"; + } +} - // Set up handles for reading data at given time stamp without - // searching through the whole bag each time. - dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); - dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); - dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); - dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); - dense_map::RosBagHandle exif_handle(FLAGS_ros_bag, FLAGS_sci_cam_exif_topic); - - std::vector nav_cam_bag_timestamps; - if (!FLAGS_simulated_data) - dense_map::readBagImageTimestamps(FLAGS_ros_bag, FLAGS_nav_cam_topic, nav_cam_bag_timestamps); - - std::map> sci_cam_exif; - if (!FLAGS_simulated_data) dense_map::readExifFromBag(exif_handle.bag_msgs, sci_cam_exif); - - // How many columns to exclude from the left and the right ends of - // the haz camera depth cloud (this is needed because that cloud has - // some distortion). - std::cout << "Cutting off " << FLAGS_depth_exclude_columns << " columns and " << FLAGS_depth_exclude_rows - << " rows at the margins of the depth point clouds.\n"; - - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - if (!FLAGS_simulated_data) { - // Read a bunch of transforms from the robot calibration file. - // The empirically determined offset between the measured timestamps - // of the cameras, in seconds. There are two reasons for this time offset: - // (1) The nav cam and sci cam are acquired on different processors. - // (2) The actual moment the camera image is recorded is not the same - // moment that image finished processing and the timestamp is set. - // This delay is worse for the sci cam which takes longer to acquire. - // TODO(oalexan1): camera_calibrator must actually solve for them. - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", - "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", - "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, - sci_cam_params); - - if (FLAGS_scicam_to_hazcam_timestamp_offset_override_value != "") { - double new_val = atof(FLAGS_scicam_to_hazcam_timestamp_offset_override_value.c_str()); - std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset - << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; - scicam_to_hazcam_timestamp_offset = new_val; - } +DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and sci_cam data."); +DEFINE_string(sparse_map, "", "A registered sparse map made with some of the ROS bag data."); +DEFINE_string(output_dir, "", "The full path to a directory where to write the processed data."); +DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); +DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", + "The depth camera intensity topic in the bag file."); +DEFINE_string(sci_cam_topic, "/hw/cam_sci", "The sci cam topic in the bag file."); +DEFINE_string(sci_cam_exif_topic, "/hw/sci_cam_exif", "The sci cam exif metadata topic the output bag."); +DEFINE_string(camera_type, "all", + "Specify which cameras to process. By default, process all. Options: " + "nav_cam, sci_cam."); +DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); +DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); +DEFINE_double(sampling_spacing_seconds, 0.5, + "Spacing to use, in seconds, between consecutive depth images in " + "the bag that are processed."); +DEFINE_double(dist_between_processed_cams, 0.1, + "Once an image or depth image is processed, how far the camera " + "should move (in meters) before it should process more data."); +DEFINE_string(sci_cam_timestamps, "", + "Process only these sci cam timestamps (rather than " + "any in the bag using --dist_between_processed_cams, etc.). Must be " + "a file with one timestamp per line."); +DEFINE_double(max_iso_times_exposure, 5.1, + "Apply the inverse gamma transform to " + "images, multiply them by max_iso_times_exposure/ISO/exposure_time " + "to adjust for lightning differences, then apply the gamma " + "transform back. This value should be set to the maximum observed " + "ISO * exposure_time. The default is 5.1. Not used with simulated data."); +DEFINE_int32(depth_exclude_columns, 0, + "Remove this many columns of data from the rectangular depth image sensor " + "at margins to avoid some distortion of that data."); +DEFINE_int32(depth_exclude_rows, 0, + "Remove this many rows of data from the rectangular depth image sensor " + "at margins to avoid some distortion of that data."); +DEFINE_double(foreshortening_delta, 5.0, + "A smaller value here will result in holes in depth images being filled more " + "aggressively but potentially with more artifacts in foreshortened regions."); +DEFINE_string(median_filters, "7 0.1 25 0.1", + "Given a list 'w1 d1 w2 d2 ... ', remove a depth image point " + "if it differs, in the Manhattan norm, from the median of cloud points " + "in the pixel window of size wi centered at it by more than di. This " + "removes points sticking out for each such i."); +DEFINE_double(depth_hole_fill_diameter, 30.0, + "Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds " + "are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh " + "later (--max_hole_diameter)."); +DEFINE_double(reliability_weight_exponent, 2.0, + "A larger value will give more weight to depth points corresponding to " + "pixels closer to depth image center, which are considered more reliable."); +DEFINE_bool(simulated_data, false, + "If specified, use data recorded in simulation. " + "Then haz and sci camera poses and intrinsics should be recorded in the bag file."); +DEFINE_bool(use_brisk_map, false, + "If specified, instead of a SURF sparse map made from the same bag that needs " + "texturing, use a pre-existing and unrelated BRISK map. This map may be more " + "convenient but less reliable."); +DEFINE_string(external_mesh, "", "Use this mesh to texture the images, rather than creating one from depth data."); +DEFINE_string(scicam_to_hazcam_timestamp_offset_override_value, "", + "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "file with this value."); +DEFINE_bool(save_debug_data, false, "Save many intermediate datasets for debugging."); + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; + + if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; + if (FLAGS_output_dir.empty()) LOG(FATAL) << "The output directory was not specified."; + if (FLAGS_sparse_map.empty() && !FLAGS_simulated_data) LOG(FATAL) << "The sparse map was not specified."; + + if (!boost::filesystem::exists(FLAGS_ros_bag)) LOG(FATAL) << "Bag does not exist: " << FLAGS_ros_bag; + + if (!boost::filesystem::exists(FLAGS_sparse_map) && !FLAGS_simulated_data) + LOG(FATAL) << "Sparse map does not exist: " << FLAGS_sparse_map; + + // Parse the median filter params + double val; + std::vector median_filter_params; + std::istringstream ifs(FLAGS_median_filters); + while (ifs >> val) median_filter_params.push_back(val); + + if (median_filter_params.size() % 2 != 0) + LOG(FATAL) << "There must exist an even number of median filter parameters, " + << "being pairs of window sizes and distances."; + + // Need high precision to print timestamps + std::cout.precision(17); + + // Read simulated poses + dense_map::StampedPoseStorage sim_sci_cam_poses, sim_haz_cam_poses; + if (FLAGS_simulated_data) { + std::string haz_cam_pose_topic = std::string("/") + TOPIC_HAZ_CAM_SIM_POSE; + std::string sci_cam_pose_topic = std::string("/") + TOPIC_SCI_CAM_SIM_POSE; + std::cout << "haz cam pose topic " << haz_cam_pose_topic << std::endl; + std::cout << "sci cam pose topic " << sci_cam_pose_topic << std::endl; + dense_map::readBagPoses(FLAGS_ros_bag, haz_cam_pose_topic, sim_haz_cam_poses); + dense_map::readBagPoses(FLAGS_ros_bag, sci_cam_pose_topic, sim_sci_cam_poses); + } - scicam_to_navcam_trans = hazcam_to_navcam_trans * scicam_to_hazcam_trans; - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "scicam_to_navcam_trans\n" << scicam_to_navcam_trans << std::endl; - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; - std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() << "\n"; + // Set up handles for reading data at given time stamp without + // searching through the whole bag each time. + dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); + dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); + dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); + dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); + dense_map::RosBagHandle exif_handle(FLAGS_ros_bag, FLAGS_sci_cam_exif_topic); + + std::vector nav_cam_bag_timestamps; + if (!FLAGS_simulated_data) + dense_map::readBagImageTimestamps(FLAGS_ros_bag, FLAGS_nav_cam_topic, nav_cam_bag_timestamps); + + std::map> sci_cam_exif; + if (!FLAGS_simulated_data) dense_map::readExifFromBag(exif_handle.bag_msgs, sci_cam_exif); + + // How many columns to exclude from the left and the right ends of + // the haz camera depth cloud (this is needed because that cloud has + // some distortion). + std::cout << "Cutting off " << FLAGS_depth_exclude_columns << " columns and " + << FLAGS_depth_exclude_rows << " rows at the margins of the depth point clouds.\n"; + + double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; + Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd scicam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::Affine3d hazcam_depth_to_image_transform; + hazcam_depth_to_image_transform.setIdentity(); // default value + camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + if (!FLAGS_simulated_data) { + // Read a bunch of transforms from the robot calibration file. + // The empirically determined offset between the measured timestamps + // of the cameras, in seconds. There are two reasons for this time offset: + // (1) The nav cam and sci cam are acquired on different processors. + // (2) The actual moment the camera image is recorded is not the same + // moment that image finished processing and the timestamp is set. + // This delay is worse for the sci cam which takes longer to acquire. + // TODO(oalexan1): camera_calibrator must actually solve for them. + dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", + "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", + "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, + scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, + navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, + sci_cam_params); + + if (FLAGS_scicam_to_hazcam_timestamp_offset_override_value != "") { + double new_val = atof(FLAGS_scicam_to_hazcam_timestamp_offset_override_value.c_str()); + std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset + << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; + scicam_to_hazcam_timestamp_offset = new_val; } - boost::shared_ptr sparse_map; - if (!FLAGS_simulated_data) { - std::cout << "Loading sparse map " << FLAGS_sparse_map << "\n"; - sparse_map = boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); - if (!FLAGS_use_brisk_map && - (sparse_map->GetDetectorName() != "SURF" || sparse_map->vocab_db_.binary_db != NULL)) { - LOG(FATAL) << "A SURF map with no vocab db is expected, unless --use_brisk_map is specified."; - } - if (FLAGS_use_brisk_map && - (sparse_map->GetDetectorName() != "ORGBRISK" || sparse_map->vocab_db_.binary_db == NULL)) { - LOG(FATAL) << "An ORGBRISK map with a vocab db is expected with --use_brisk_map."; - } - } + scicam_to_navcam_trans = hazcam_to_navcam_trans * scicam_to_hazcam_trans; + std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; + std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; + std::cout << "scicam_to_navcam_trans\n" << scicam_to_navcam_trans << std::endl; + std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; + std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; + std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() << "\n"; + } - // A very important sanity check. Not how we use gflags programatically - // to access the value of --histogram_equalization as a string encoding a bool - // ("true" or "false"). Here we ensure that the same flag for histogram - // equalization is used in the map and for localization. - std::string histogram_equalization; - if (gflags::GetCommandLineOption("histogram_equalization", &histogram_equalization)) { - if ((histogram_equalization == "true" && !sparse_map->GetHistogramEqualization()) || - (histogram_equalization == "false" && sparse_map->GetHistogramEqualization())) - LOG(FATAL) << "The histogram equalization option in the sparse map and then one desired " - << "to use for localization are incompatible."; + boost::shared_ptr sparse_map; + if (!FLAGS_simulated_data) { + std::cout << "Loading sparse map " << FLAGS_sparse_map << "\n"; + sparse_map = boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + if (!FLAGS_use_brisk_map && + (sparse_map->GetDetectorName() != "SURF" || sparse_map->vocab_db_.binary_db != NULL)) { + LOG(FATAL) << "A SURF map with no vocab db is expected, unless --use_brisk_map is specified."; } - - // If desired to process only specific timestamps - std::set sci_cam_timestamps; - if (FLAGS_sci_cam_timestamps != "") { - std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); - double val; - while (ifs >> val) sci_cam_timestamps.insert(val); + if (FLAGS_use_brisk_map && + (sparse_map->GetDetectorName() != "ORGBRISK" || sparse_map->vocab_db_.binary_db == NULL)) { + LOG(FATAL) << "An ORGBRISK map with a vocab db is expected with --use_brisk_map."; } + } - dense_map::createDir(FLAGS_output_dir); + // A very important sanity check. Not how we use gflags programatically + // to access the value of --histogram_equalization as a string encoding a bool + // ("true" or "false"). Here we ensure that the same flag for histogram + // equalization is used in the map and for localization. + std::string histogram_equalization; + if (gflags::GetCommandLineOption("histogram_equalization", &histogram_equalization)) { + if ((histogram_equalization == "true" && !sparse_map->GetHistogramEqualization()) || + (histogram_equalization == "false" && sparse_map->GetHistogramEqualization())) + LOG(FATAL) << "The histogram equalization option in the sparse map and then one desired " + << "to use for localization are incompatible."; + } - // Store here the sci cam and nav cam images whose poses we save - std::string sci_cam_dir = FLAGS_output_dir + "/distorted_sci_cam"; - std::string nav_cam_dir = FLAGS_output_dir + "/distorted_nav_cam"; - dense_map::createDir(sci_cam_dir); - dense_map::createDir(nav_cam_dir); + // If desired to process only specific timestamps + std::set sci_cam_timestamps; + if (FLAGS_sci_cam_timestamps != "") { + std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); + double val; + while (ifs >> val) sci_cam_timestamps.insert(val); + } - // Save haz cam poses and haz cam clouds - if (FLAGS_external_mesh == "") { - std::string cam_type = "haz_cam"; - // haz to nav offset - double desired_cam_to_nav_cam_offset = -navcam_to_hazcam_timestamp_offset; - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, hazcam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, - FLAGS_reliability_weight_exponent, - median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, "", // not needed - FLAGS_start, FLAGS_duration, FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, - sci_cam_timestamps, FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, - sim_haz_cam_poses, FLAGS_external_mesh); - } + dense_map::createDir(FLAGS_output_dir); + + // Store here the sci cam and nav cam images whose poses we save + std::string nav_cam_dir = FLAGS_output_dir + "/distorted_nav_cam"; + std::string haz_cam_dir = FLAGS_output_dir + "/distorted_haz_cam"; + std::string sci_cam_dir = FLAGS_output_dir + "/distorted_sci_cam"; + dense_map::createDir(nav_cam_dir); + dense_map::createDir(haz_cam_dir); + dense_map::createDir(sci_cam_dir); + + // Save haz cam poses and haz cam clouds + if (FLAGS_external_mesh == "") { + std::string cam_type = "haz_cam"; + // haz to nav offset + double desired_cam_to_nav_cam_offset = -navcam_to_hazcam_timestamp_offset; + saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, hazcam_to_navcam_trans, + hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, + FLAGS_reliability_weight_exponent, + median_filter_params, + FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, + haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, + sci_cam_exif, FLAGS_output_dir, haz_cam_dir, + FLAGS_start, FLAGS_duration, FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, + sci_cam_timestamps, FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, + sim_haz_cam_poses, FLAGS_external_mesh); + } - // Save sci cam poses and sci cam images - if (FLAGS_camera_type == "sci_cam" || FLAGS_camera_type == "all") { - std::string cam_type = "sci_cam"; - // sci to nav offset - double desired_cam_to_nav_cam_offset = scicam_to_hazcam_timestamp_offset - navcam_to_hazcam_timestamp_offset; - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, scicam_to_navcam_trans, + // Save sci cam poses and sci cam images + if (FLAGS_camera_type == "sci_cam" || FLAGS_camera_type == "all") { + std::string cam_type = "sci_cam"; + // sci to nav offset + double desired_cam_to_nav_cam_offset = scicam_to_hazcam_timestamp_offset - navcam_to_hazcam_timestamp_offset; + saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, scicam_to_navcam_trans, + hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, + FLAGS_reliability_weight_exponent, + median_filter_params, + FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, + haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, + sci_cam_exif, FLAGS_output_dir, sci_cam_dir, FLAGS_start, FLAGS_duration, + FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, + FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_sci_cam_poses, + FLAGS_external_mesh); + + // With simulated data we won't perform an additional undistortion + // step which would save the undistorted intrinsics. Hence need to + // do that here. + if (FLAGS_simulated_data) saveSciCamIntrinsics(FLAGS_ros_bag, sci_cam_dir); + } + // Save nav cam poses and nav cam images. This does not work for simulated data. + if ((FLAGS_camera_type == "nav_cam" || FLAGS_camera_type == "all") && !FLAGS_simulated_data) { + if (FLAGS_use_brisk_map) { + // Do the same logic as for the sci cam, localize each nav cam image against the map, etc. + std::string cam_type = "nav_cam"; + double desired_cam_to_nav_cam_offset = 0.0; // no offset from the nav cam to itself + dense_map::StampedPoseStorage sim_nav_cam_poses; // won't be used + saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, navcam_to_navcam_trans, hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, FLAGS_reliability_weight_exponent, median_filter_params, FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, sci_cam_dir, FLAGS_start, FLAGS_duration, + sci_cam_exif, FLAGS_output_dir, nav_cam_dir, FLAGS_start, FLAGS_duration, FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, - FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_sci_cam_poses, + FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_nav_cam_poses, FLAGS_external_mesh); - - // With simulated data we won't perform an additional undistortion - // step which would save the undistorted intrinsics. Hence need to - // do that here. - if (FLAGS_simulated_data) saveSciCamIntrinsics(FLAGS_ros_bag, sci_cam_dir); - } - // Save nav cam poses and nav cam images. This does not work for simulated data. - if ((FLAGS_camera_type == "nav_cam" || FLAGS_camera_type == "all") && !FLAGS_simulated_data) { - if (FLAGS_use_brisk_map) { - // Do the same logic as for the sci cam, localize each nav cam image against the map, etc. - std::string cam_type = "nav_cam"; - double desired_cam_to_nav_cam_offset = 0.0; // no offset from the nav cam to itself - dense_map::StampedPoseStorage sim_nav_cam_poses; // won't be used - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, navcam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, - FLAGS_reliability_weight_exponent, - median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, nav_cam_dir, FLAGS_start, FLAGS_duration, - FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, - FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_nav_cam_poses, - FLAGS_external_mesh); - } else { - // Take a shortcut, since the sparse map is made of precisely of the images we - // want to texture, just save those images and their poses from the map, avoiding - // localization - save_navcam_poses_and_images(sparse_map, nav_cam_handle.bag_msgs, FLAGS_output_dir, nav_cam_dir); - } + } else { + // Take a shortcut, since the sparse map is made of precisely of the images we + // want to texture, just save those images and their poses from the map, avoiding + // localization + save_navcam_poses_and_images(sparse_map, nav_cam_handle.bag_msgs, FLAGS_output_dir, nav_cam_dir); } - - return 0; } + + return 0; +} diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index d932d0d2..018ed92c 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -94,7 +94,7 @@ def process_args(args): dest="camera_type", default="all", help="Specify which cameras to process. By default, process all. " - + "Options: nav_cam, sci_cam.", + + "Options: nav_cam, haz_cam, sci_cam.", ) parser.add_argument( "--start", @@ -352,6 +352,10 @@ def sanity_checks(geometry_mapper_path, batch_tsdf_path, args): ) sys.exit(1) + if args.camera_type == "haz_cam" and args.simulated_data: + print("Texturing haz cam with simulated data was not tested.") + sys.exit(1) + if args.output_dir == "": raise Exception("The path to the output directory was not specified.") @@ -477,11 +481,11 @@ def fuse_clouds(batch_tsdf_path, mesh, args): Invoke the voxblox batch_tsdf tool to fuse the depth images. """ - haz_cam_index = os.path.join(args.output_dir, "haz_cam_index.txt") + depth_cam_index = os.path.join(args.output_dir, "depth_cam_index.txt") cmd = [ batch_tsdf_path, - haz_cam_index, + depth_cam_index, mesh, args.max_ray_length, args.voxel_size, @@ -723,6 +727,10 @@ def texture_mesh(src_path, cam_type, mesh, args): if cam_type == "nav_cam": scale = 1.0 undist_crop_win = "1100 776" + elif cam_type == "haz_cam": + scale = 1.0 + undist_crop_win = "210 160" + elif cam_type == "sci_cam": # Deal with the fact that the robot config file may assume # a different resolution than what is in the bag @@ -889,6 +897,7 @@ def merge_poses_and_clouds(args): simplified_mesh = args.external_mesh nav_textured_mesh = "" + haz_textured_mesh = "" sci_textured_mesh = "" if start_step <= 9: # For simulated data texturing nav cam images is not supported @@ -897,6 +906,12 @@ def merge_poses_and_clouds(args): ): nav_textured_mesh = texture_mesh(src_path, "nav_cam", simplified_mesh, args) + # For simulated data texturing haz cam images is not supported + if (not args.simulated_data) and ( + args.camera_type == "all" or args.camera_type == "haz_cam" + ): + haz_textured_mesh = texture_mesh(src_path, "haz_cam", simplified_mesh, args) + if args.camera_type == "all" or args.camera_type == "sci_cam": sci_textured_mesh = texture_mesh(src_path, "sci_cam", simplified_mesh, args) @@ -915,5 +930,8 @@ def merge_poses_and_clouds(args): if nav_textured_mesh != "": print("Nav cam textured mesh: " + nav_textured_mesh) + if haz_textured_mesh != "": + print("Haz cam textured mesh: " + haz_textured_mesh) + if sci_textured_mesh != "": print("Sci cam textured mesh: " + sci_textured_mesh) diff --git a/dense_map/geometry_mapper/tools/image_picker.cc b/dense_map/geometry_mapper/tools/image_picker.cc index d430c79b..d8e14135 100644 --- a/dense_map/geometry_mapper/tools/image_picker.cc +++ b/dense_map/geometry_mapper/tools/image_picker.cc @@ -265,9 +265,9 @@ int main(int argc, char** argv) { // Write the images to disk bool save_grayscale = true; // For feature matching need grayscale - double found_time = -1.0; // won't be used, but expected by the api - int bag_pos = 0; // reset this each time for (size_t nav_it = 0; nav_it < nav_cam_timestamps.size(); nav_it++) { + double found_time = -1.0; // won't be used, but expected by the api + int bag_pos = 0; // reset this each time cv::Mat image; if (!dense_map::lookupImage(nav_cam_timestamps[nav_it], nav_cam_handle.bag_msgs, save_grayscale, image, bag_pos, From 89bbeb95638a1de4ed93cca9f5e88467ae0f385a Mon Sep 17 00:00:00 2001 From: Brian Coltin Date: Thu, 16 Dec 2021 10:49:43 -0800 Subject: [PATCH 12/40] Readme update (#7) * Update description in readme. * adding github pages links * reviewing TOC - had to change markdown usage in some places; adding some description * adding astrobee repo link * adding badges Co-authored-by: Marina Moreira --- DEMO_INSTALL.md | 15 +++-- INSTALL.md | 27 ++++++--- README.md | 61 +++++++++++++++------ astrobee/behaviors/cargo/readme.md | 2 +- astrobee/behaviors/inspection/readme.md | 45 +++++---------- astrobee/behaviors/readme.md | 3 - astrobee/hardware/readme.md | 8 +-- astrobee/hardware/wifi/readme.md | 8 +-- astrobee/readme.md | 10 ++-- astrobee/simulation/acoustics_cam/readme.md | 12 ++-- astrobee/simulation/isaac_gazebo/readme.md | 4 +- dense_map/readme.md | 2 - dense_map/volumetric_mapper/readme.md | 12 ++-- img_analysis/readme.md | 18 ++++-- isaac/Subsystems.md | 2 +- 15 files changed, 127 insertions(+), 102 deletions(-) diff --git a/DEMO_INSTALL.md b/DEMO_INSTALL.md index 3c17eb62..5c4e0439 100644 --- a/DEMO_INSTALL.md +++ b/DEMO_INSTALL.md @@ -1,8 +1,10 @@ -# Demo docker install +Demo docker install +===== Instructions for the full demo install -### Check out +Check out +--------- Run: @@ -15,7 +17,8 @@ Run: (You can also modify the checkout location `~/ws` if you want.) -### Install dependencies +Install dependencies +--------- Install docker tools: @@ -23,11 +26,13 @@ Install docker tools: Install nvidia-docker by following (the directions here)[https://github.com/NVIDIA/nvidia-docker]. -### Build +Build +--------- Run `scripts/docker/build.sh` to build the docker images for the demo. -### Install +Install +--------- Run `scripts/docker/run.sh` to run the demo. Open `http://127.0.0.1:8080` in a web browser to see what is happening. Use `docker ps` to see the docker containers and use `docker exec -it container_name /bin/bash` to get a shell in one. diff --git a/INSTALL.md b/INSTALL.md index 5157e7d9..d51722b7 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,6 +1,8 @@ -# Native Install +Native Install +===== -### Usage instructions for non-NASA and NASA users +Usage instructions for non-NASA and NASA users +--------- Install the 64-bit version of [Ubuntu 16.04, 18.04 or 20.04](http://releases.ubuntu.com/) on a host machine, and make sure that you can checkout and build code. @@ -16,11 +18,13 @@ any other operating system or Ubuntu versions.* *Note: Please ensure you install the 64-bit version of Ubuntu. We do not support running ISAAC Software on 32-bit systems.* -### Machine setup +Machine setup +--------- The `isaac` repo depends on some `astrobee` packages, therefore, `astrobee` needs to be installed beforehand. -### Checkout the project source code +Checkout the project source code +--------- At this point you need to decide where you'd like to put the ISAAC workspace and code (`ISAAC_WS`) on your machine (add this to your .bashrc for persistency): @@ -37,7 +41,8 @@ Checkout the submodule: git submodule update --init --recursive -### Dependencies +Dependencies +--------- Next, install all required dependencies: *Note: `root` access is necessary to install the packages below* @@ -52,13 +57,15 @@ Next, install all required dependencies: rosdep update popd -### Configuring the build +Configuring the build +--------- By default, the catkin uses the following paths: - devel build path: `$ISAAC_WS/devel` - install build path: `$ISAAC_WS/install` -### Building the code +Building the code +--------- Source your astrobee build environment, for example as: @@ -80,7 +87,8 @@ The command 'source devel/setup.bash' is very important to make sure that everyt If you are working in simulation only, then you're all done! The next steps are only for running ISAAC onboard Astrobee. -### Cross-compiling isaac +Cross-compiling isaac +--------- To cross-compile ISAAC, one must first cross compile the astobee code using the NASA_INSTALL instructions. @@ -99,7 +107,8 @@ A new catkin profile should be made to retain the configurations and easily swit --cmake-args -DCMAKE_TOOLCHAIN_FILE=$ISAAC_WS/src/scripts/build/isaac_cross.cmake \ -DARMHF_CHROOT_DIR=$ARMHF_CHROOT_DIR -### Build ISAAC debian +Build ISAAC debian +--------- To build a debian you must first confirm that cross-compiling is functional. Once it is: diff --git a/README.md b/README.md index efcde8f2..3383cae3 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,53 @@ -# ISAAC Software - -### About - -The Integrated System for Autonomous and Adaptive Caretaking project is developing technology for combining robots inside a spacecraft with vehicle infrastructure subsystems, to form an integrated autonomous system. -The system validates its technology through demonstrations using the Astrobee free-flier robots, existing robots onboard the International Space Station as analogs for future robots that will be developed for the Gateway. - -The ISAAC project actively develops in a variety of repos hosted at -Ames and JSC. +ISAAC (Integrated System for Autonomous and Adaptive Caretaking) +===== + +[![GitHub license](https://img.shields.io/github/license/nasa/isaac)](https://github.com/nasa/isaac/blob/master/LICENSE) +[![Build, test and push packages CI](https://github.com/nasa/isaac/actions/workflows/ci_push.yml/badge.svg)](https://github.com/nasa/isaac/actions/workflows/ci_push.yml) + +The ISAAC project has three main technical thrusts: + +1. **Integrated data**: The current state of the art is that data from sensors associated with different facility + subsystems (Structures, GN&C, and ECLSS, etc.) remain siloed. ISAAC technology unifies facility data and models with + autonomous robotics, linking data streams from facility subsystems, sensor networks, and robots, as well as linking 3D + geometry and sensor data map layers, and detecting changes and anomalies. +2. **Integrated control interface**: Current state of the art manages facilities with largely independent interface tools + for different subsystems. Interfaces have different heritage, design assumptions and operator interface styles. Subsystem + interactions are hard to analyze due to poor connectivity between separate tools. ISAAC technology combines interface + tools for facility subsystems and autonomous robots; improves system-level situation awareness, situation understanding, + and operator productivity; enables linking and embedding between tools to improve subsystem interaction analysis; and + supports the entire activity life cycle from planning through execution and analysis. +3. **Coordinated execution**: The current state of the art for executing facility activities that require coordination + between subsystems is either manual commanding (operator tracks dependencies between subsystems) or simple sequencing + that can be brittle to even minor contingencies during execution. ISAAC technology models dependencies, uses automated + planning to translate a high-level task definition to a plan that can work given the current system state (e.g. include + actions to open hatches so that a robot can move where needed), and leverages ISAAC’s integrated data technology to + watch for execution contingencies and trigger replanning as needed. This technology will reduce the time to effect + changes on the system during critical faults and emergencies. This `isaac` repo serves as a master for integrating an end-to-end demo that draws on code from the other repos as well as directly including a significant amount of the ISAAC code, mostly relating to -the Astrobee robot. +the [Astrobee robot](https://github.com/nasa/astrobee). This repository includes: + +- [Astrobee software](https://nasa.github.io/isaac/html/astrobee.html) for added behaviors such as inpection and cargo transport as well as new sensor utilization. +- [Dense mapping](https://nasa.github.io/isaac/html/geometric_streaming_mapper.html) to create a textured 3D map +- [Volumetric mapping](https://nasa.github.io/isaac/html/volumetric_mapper.html) to map volumetric signals, such as WiFi. +- [Image analysis](https://nasa.github.io/isaac/html/ano.html) module to train a neural network to detect anomalies +You may also be interested in the separate repository for the [ISAAC User Interface](https://github.com/nasa/isaac_user_interface), +which enables monitoring of multiple robots through a web browser. -### System requirements +System requirements +--------- The `isaac` repo depends on the `astrobee` repo, therefore it inherits the same system requirements. You must use Ubuntu 16.04 to 20.04 64-bit. When running in simulation, certain Gazebo plugins require appropriate -graphics drivers. +graphics drivers. See INSTALL.md in that repository for more +information. -### Usage +Usage +--------- There are two main ways to install and run `isaac`: @@ -39,7 +65,8 @@ There are two main ways to install and run `isaac`: [Instructions on installing and using the ISAAC Software](https://nasa.github.io/isaac/html/md_INSTALL.html). For running the [docker demos](https://nasa.github.io/isaac/html/md_DEMO_INSTALL.html) -### Documentation +Documentation +--------- [The documentation is auto-generated from the contents of this repository.](https://nasa.github.io/isaac/documentation.html) @@ -47,14 +74,16 @@ To compile the documentation locally (make sure you have the latest doxygen inst doxygen isaac.doxyfile -### Contributing +Contributing +--------- The ISAAC Software is open source, and we welcome contributions from the public. Please submit pull requests to the `develop` branch. For us to merge any pull requests, we must request that contributors sign and submit either an [Individual Contributor License Agreement](https://github.com/nasa/isaac/blob/94996bc1a20fa090336e67b3db5c10a9bb30f0f7/doc/cla/ISAAC_Individual%20CLA.pdf) or a [Corporate Contributor License Agreement](https://github.com/nasa/isaac/blob/94996bc1a20fa090336e67b3db5c10a9bb30f0f7/doc/cla/ISAAC_Corporate%20CLA.pdf) due to NASA legal requirements. Thank you for your understanding. -### License +License +--------- Copyright (c) 2021, United States Government, as represented by the Administrator of the National Aeronautics and Space Administration. diff --git a/astrobee/behaviors/cargo/readme.md b/astrobee/behaviors/cargo/readme.md index 93016ffa..4bae028f 100644 --- a/astrobee/behaviors/cargo/readme.md +++ b/astrobee/behaviors/cargo/readme.md @@ -2,7 +2,7 @@ This directory provides the cargo_tool -## Using the cargo tool +### Using the cargo tool This tool is used to initiate pickup and drop cargo actions. diff --git a/astrobee/behaviors/inspection/readme.md b/astrobee/behaviors/inspection/readme.md index 14d4902e..e280c7d6 100644 --- a/astrobee/behaviors/inspection/readme.md +++ b/astrobee/behaviors/inspection/readme.md @@ -1,8 +1,9 @@ -\page inspection Inspeiction Behavior +\page inspection Inspection Behavior This directory provides two tools: inspection_tool and sci_cam_tool. -## Using the inspection tool +Using the inspection tool +--------- This tool is used to initiate inspection actions. To run the tool: @@ -10,45 +11,27 @@ This tool is used to initiate inspection actions. To run the tool: As of now, the actions available are: -### Pause +*pause*: Pauses the current action being performed, the goal is kept, so a resume command will resume the action. -Pauses the current action being performed, the goal is kept, so a resume command will resume the action. +*resume*: Resumes a paused or failed action (in the case where there is a motion fail or picture fail). It will restart on the "move to inspection pose" step. -### Resume +*repeat*: Resumes the survey repeating the last inspection pose. -Resumes a paused or failed action (in the case where there is a motion fail or picture fail). It will restart on the "move to inspection pose" step. +*skip*: Skips the surrent inspection pose, useful if a certain pose is unreachable. -### Repeat -Resumes the survey repeating the last inspection pose. +*save*: Saves the current goal, in the case flight software needs to be restarted. The current survey is saved in resources/current.txt. It can be loaded afterwards using +*anomaly*: Starts an anomaly inspection action. The robot will come up to a target, take a picture and run the picture through the image anomaly detector to classify it. -### Skip +*geometry*: Starts a geometry inspection, meaning that it will go to the commanded poses and take pictures to be processed by the geometry mapper. -Skips the surrent inspection pose, useful if a certain pose is unreachable. +*panorama*: it will do a panorama survey of all points specified +*volumetric*: This will perform a volumetric survey -### Save - -Saves the current goal, in the case flight software needs to be restarted. The current survey is saved in resources/current.txt. It can be loaded afterwards using - -### Anomaly - -Starts an anomaly inspection action. The robot will come up to a target, take a picture and run the picture through the image anomaly detector to classify it. - -### Geometry - -Starts a geometry inspection, meaning that it will go to the commanded poses and take pictures to be processed by the geometry mapper. - -### Panorama - -it will do a panorama survey of all points specified - -### Volumetric - -This will perform a volumetric survey - -## Using sci_cam_tool +Using sci_cam_tool +--------- This tool is used to control the sci cam plugin in the Astrobee simulator, more precisely the way it acquires pictures. To use it, perform the following steps: diff --git a/astrobee/behaviors/readme.md b/astrobee/behaviors/readme.md index cf40b0be..8de60c00 100644 --- a/astrobee/behaviors/readme.md +++ b/astrobee/behaviors/readme.md @@ -1,8 +1,5 @@ \page beh Behaviors - -## Overview - Behaviors are high-level actions that support complex maneuvers, such as control of the arm, docking, and perching. They are built upon the more more generic actions offered by other subsystems, such as the mobility subsystem's "motion" action, or the localization manager's "switch" action. \subpage inspection diff --git a/astrobee/hardware/readme.md b/astrobee/hardware/readme.md index 4680da30..ee311ba6 100644 --- a/astrobee/hardware/readme.md +++ b/astrobee/hardware/readme.md @@ -1,9 +1,3 @@ \page hw Hardware - -## Overview - -Hardware drivers - -\subpage wifi_driver -\subpage acoustics_camera \ No newline at end of file +\subpage wifi_driver : Hardware utilization nodes \ No newline at end of file diff --git a/astrobee/hardware/wifi/readme.md b/astrobee/hardware/wifi/readme.md index 7abb1c61..e03146dd 100644 --- a/astrobee/hardware/wifi/readme.md +++ b/astrobee/hardware/wifi/readme.md @@ -1,7 +1,5 @@ \page wifi_driver Wifi Driver -# Overview - This package provides the driver for publishing wifi strength info. I is based on the library available in https://github.com/bmegli/wifi-scan, adapted to ROS for this project. Because the function that scans all the networks needs network admin rights, there are 2 modes in which this driver can run. The modes can be set as: @@ -23,14 +21,14 @@ General Parameters: interface_name: to find ou the interface name, one can find out the interface name by running ifconfig in the cmd line. -# Scan Station +### Scan Station Publishes information to the topic hw/wifi/station. It only includes signal strength of the wifi network to which the robot is currently connected to. Scan Station Parameters: time_scan_station: update rate of the station scan, can be set upt to 50ms. -# Scan All +### Scan All Publishes information to the topic hw/wifi/all. Scan All Parameters: @@ -42,7 +40,7 @@ Scan All Parameters: All data is published as messages on ROS topics using the prefix hw/wifi. -# Lib mnl +### Lib mnl This package is needed in hardware/wifi node, such that we can scan the wifi networks. If you are missing this package, and have sudo rights, do: diff --git a/astrobee/readme.md b/astrobee/readme.md index b561418c..89e60ac6 100644 --- a/astrobee/readme.md +++ b/astrobee/readme.md @@ -1,12 +1,12 @@ \page astrobee Astrobee -This folder contains the ISAAC additions to the Astrobee FSW +The Astrobeemodule contains the ISAAC additions to the Astrobee FSW -\subpage beh : High-level actions that support complex maneuvers. +\subpage beh : High-level actions that support complex maneuvers. The Inspection behavior facilitates data collection events (with options as geometric survey, panorama survey, volumetric survey or anomaly survey); the cargo transport behavior contains the pick and drop off cargo actions. -\subpage hw : Hardware drivers +\subpage hw : Sensor utilization, containing the WiFi reader. -\subpage sim : Gazebo plugins for simulation +\subpage sim : Gazebo plugins for simulation including acoustic and heat camera, RFID reader and emmiter, as well as WiFi reader and emmiters. -\subpage gs_action_helper : Guest science action helper \ No newline at end of file +\subpage gs_action_helper : Guest science action helper used to communicate with the ground through DDS messages \ No newline at end of file diff --git a/astrobee/simulation/acoustics_cam/readme.md b/astrobee/simulation/acoustics_cam/readme.md index ef4eb097..a57fc13e 100644 --- a/astrobee/simulation/acoustics_cam/readme.md +++ b/astrobee/simulation/acoustics_cam/readme.md @@ -1,8 +1,6 @@ \page acoustics_camera Acoustics camera -# Acoustics camera - -## Overview +### Overview This camera simulates a microphone array, or, in other words, a directional microphone. Its readings are assembled into a spherical @@ -19,7 +17,7 @@ from. The reading at a pixel of that camera is the value of the microphone measurement in the direction of the ray going from the microphone (and camera) center through that pixel. -## Installation +### Installation The acoustics camera depends on the pyroomacoustics package. This package can be installed together with its dependencies in a Python @@ -33,7 +31,7 @@ It would normally install itself in: $HOME/.local/lib/python2.7/site-packages/pyroomacoustics -## Running the acoustics camera +### Running the acoustics camera The acoustics camera ROS node can be run as part of the simulator as: @@ -68,7 +66,7 @@ mode it will only create a plot of the acoustics cam image. The sources of sounds will be represented as crosses in this plot, and the camera (microphone) position will be shown as a star. -## ROS communication +### ROS communication The acoustics camera subscribes to @@ -93,7 +91,7 @@ must use the app name "gov.nasa.arc.irg.astrobee.acoustics_cam_image" (which is the "s" field in the first command argument) for it to be processed. -## Configuration +### Configuration The behavior of this camera is described in diff --git a/astrobee/simulation/isaac_gazebo/readme.md b/astrobee/simulation/isaac_gazebo/readme.md index 3ca8975f..79661374 100644 --- a/astrobee/simulation/isaac_gazebo/readme.md +++ b/astrobee/simulation/isaac_gazebo/readme.md @@ -5,7 +5,7 @@ Simulation contains the packages where the dense maps are built This page describes the isaac gazebo plugins. -# Heat cam +### Heat cam This plugin simulates a heat-detecting camera, with the color in the images it produces suggestive of the temperature on the surface of the @@ -72,3 +72,5 @@ at a specific time, or to take pictures continuously. Such a command must use the app name "gov.nasa.arc.irg.astrobee.heat_cam_image" (which is the "s" field in the first command argument) for it to be processed. + +\subpage acoustics_camera \ No newline at end of file diff --git a/dense_map/readme.md b/dense_map/readme.md index ff4b89f1..b0600d31 100644 --- a/dense_map/readme.md +++ b/dense_map/readme.md @@ -1,8 +1,6 @@ \page idm Dense Map -## Overview - This cluster of nodes addresses the dense map capabilities, turning the sensor data into maps of the space. \subpage geometric_streaming_mapper diff --git a/dense_map/volumetric_mapper/readme.md b/dense_map/volumetric_mapper/readme.md index 7999979f..d682f031 100644 --- a/dense_map/volumetric_mapper/readme.md +++ b/dense_map/volumetric_mapper/readme.md @@ -1,12 +1,15 @@ \page volumetric_mapper Volumetric Mapper +Volumetric Mapper +============== - -## Overview +Overview +--------- The wifi mapper subscribes to all the wifi signal strength messages and prcesses them for 3D visualization. Customization os the produced maps is defined in isaac/config/dense_map/wifi_mapper.config -## Trace Map +Trace Map +--------- The trace map is drawn at a certain resolution, within this resolution, every signal received is averaged. The result is depicted using MarkerArray's where the cube size is the map resolution. @@ -15,7 +18,8 @@ plot_trace - enable or disable the trace map calculation resolution - resolution of the trace map where whithin it, the measurements will be averaged -## Wifi 3D Map +Wifi 3D Map +--------- The Wifi Mapper uses Gaussian Process Regression to mapp the ISS regarding the wifi signal strength. It makes use of the libgp library. When a new measure is obtained, the value is recorded in the GP object. When the timer responsible for calculating the wifi interpolation is enables, all the recorded data is processed. diff --git a/img_analysis/readme.md b/img_analysis/readme.md index f503a758..c9293ae4 100644 --- a/img_analysis/readme.md +++ b/img_analysis/readme.md @@ -1,11 +1,16 @@ \page ano Anomaly Detector -## Overview +Image Anomaly Detector +==================== + +Overview +--------- The Image anomaly detector contains a set of tools to analyse incoming images, using Convolutional Neural Networks, CNN's. To build, train and test the CNN's we use PyTorch. -## TorchLib +TorchLib +--------- This package is needed in the anomaly/img_analysis node, such that we can analyse the image, looking for anomalies. The first step is to download the LibTorch ZIP archive, the link might change, best to go to https://pytorch.org/ and select Linux->LibTorch->C++/Java @@ -23,7 +28,8 @@ To link the path, add this to your '$HOME/.bashrc' file: export CMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH:/path/to/libtorch/share/cmake/Torch -## Define and train the CNN +Define and train the CNN +--------- The python code containing the CNN definition and training is in resources/vent_cnn.py @@ -34,7 +40,8 @@ num_epochs - number of epochs to train, default 30 model_name - saved model name, default "model_cnn.pt" trace_model_name - saved traced model name, default "traced_model_cnn.pt" -### Get training data +Get training data +--------- To get training data, a tool is available which will read the poses from a vents file and others file. The tool will change the robot's pose and take pictures automatically. For the should be activated when the simulation is spawned like so (should be spawned in an undocked position such that the dock simulation does not interfere with the manual set of the pose): @@ -52,7 +59,8 @@ robot_dist - Robot's distance to vent, standard is 1m train_pics_per_vent - Number of pictures taken per vent/other for train data test_pics_per_vent - Number of pictures taken per vent/other for test data -## Test single picture +Test single picture +--------- There is a script, analyse_img.py, in the resources/ folder, which takes as argument the path of a picture taken with the sci_cam, processing it and outputing the classification result. This algorithm is useful to make sure that the C++ API for Pytorch is working properly. diff --git a/isaac/Subsystems.md b/isaac/Subsystems.md index b4f52361..4f327357 100644 --- a/isaac/Subsystems.md +++ b/isaac/Subsystems.md @@ -1,4 +1,4 @@ -# Subsystems +\page subsystems Subsystems The ISAAC Repo contains the following Modules: From b46fcdd7439bcc99925c28ad28c78b3c1ebae944 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Thu, 16 Dec 2021 11:30:42 -0800 Subject: [PATCH 13/40] fixing surveys; bump up isaac version (#9) --- RELEASE.md | 17 +++++++++++++++-- .../inspection/resources/scicam_panorama.txt | 2 ++ .../resources/soundsee_panorama.txt | 2 +- debian/changelog | 19 +++++++++++++------ isaac.doxyfile | 2 +- scripts/update_release.sh | 19 +++++++++++++++++++ 6 files changed, 51 insertions(+), 10 deletions(-) create mode 100644 astrobee/behaviors/inspection/resources/scicam_panorama.txt create mode 100755 scripts/update_release.sh diff --git a/RELEASE.md b/RELEASE.md index bda6f065..7e628ffd 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,9 +1,22 @@ # Releases +## Release 0.2.3 + + * Panorama picture taking mode supported + * Cargo transport simulation support + +## Release 0.2.2 + + * Compatible wit 0.14.x versions, made for ISAAC 3 activity + +## Release 0.2.1 + + * Compatible wit 0.14.x versions, made for ISAAC 2 activity + ## Release 0.2.0 - * Ready for ISAAC 1-3 activities + * Compatible wit 0.14.x versions, made for ISAAC 1 activity ## Release 0.1.0 - * Demo June 2020 + * Initial release. Demo June 2020. diff --git a/astrobee/behaviors/inspection/resources/scicam_panorama.txt b/astrobee/behaviors/inspection/resources/scicam_panorama.txt new file mode 100644 index 00000000..46e20d03 --- /dev/null +++ b/astrobee/behaviors/inspection/resources/scicam_panorama.txt @@ -0,0 +1,2 @@ +# Panorama +11 -9 5.0 0 0 180 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_panorama.txt b/astrobee/behaviors/inspection/resources/soundsee_panorama.txt index 46e20d03..d3dd47a1 100644 --- a/astrobee/behaviors/inspection/resources/soundsee_panorama.txt +++ b/astrobee/behaviors/inspection/resources/soundsee_panorama.txt @@ -1,2 +1,2 @@ # Panorama -11 -9 5.0 0 0 180 \ No newline at end of file +11 -9 5.5 0 -90 0 \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index bea60adc..1ddfd3e3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,26 +1,33 @@ +isaac (0.2.3) UNRELEASED; urgency=medium + + * Panorama picture taking mode supported + * Cargo transport simulation support + + -- Marina Moreira Mon, 13 Dec 2021 15:51:01 -0800 + isaac (0.2.2) testing; urgency=medium * Compatible wit 0.14.x versions, made for ISAAC 3 activity - -- Astrobee Flight Software Thu, 14 Apr 2021 10:37:25 -0800 + -- ISAAC Flight Software Thu, 14 Apr 2021 10:37:25 -0800 isaac (0.2.1) testing; urgency=medium * Compatible wit 0.14.x versions, made for ISAAC 2 activity - -- Astrobee Flight Software Thu, 21 Jan 2021 10:37:25 -0800 + -- ISAAC Flight Software Thu, 21 Jan 2021 10:37:25 -0800 isaac (0.2.0) testing; urgency=medium - * Compatible wit 0.14.x versions + * Compatible wit 0.14.x versions, made for ISAAC 1 activity - -- Astrobee Flight Software Thu, 21 Jan 2021 10:37:25 -0800 + -- ISAAC Flight Software Thu, 21 Jan 2021 10:37:25 -0800 isaac (0.1.0) testing; urgency=medium - * Initial release + * Initial release. Demo June 2020. - -- Astrobee Flight Software Thu, 21 Jan 2021 10:37:25 -0800 + -- ISAAC Flight Software Thu, 21 Jan 2021 10:37:25 -0800 diff --git a/isaac.doxyfile b/isaac.doxyfile index 07bc0350..7634df31 100644 --- a/isaac.doxyfile +++ b/isaac.doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "ISAAC" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.1.0 +PROJECT_NUMBER = 0.2.3 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/scripts/update_release.sh b/scripts/update_release.sh new file mode 100755 index 00000000..2baf4bb3 --- /dev/null +++ b/scripts/update_release.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +if [ $# -ne 1 ] + then + echo "Usage: $0 VERSION" + exit 0 +fi + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +cd $DIR/.. + +dch -c debian/changelog -v $1 + +sed -i -e "s/\# ISAAC Robot Software v1/\# ISAAC Robot Software v1\n\n\#\# Release $1\n\nINSERT DESCRIPTION HERE/g" RELEASE.md + +sed -i -e "s/^PROJECT_NUMBER.*/PROJECT_NUMBER = $1/g" isaac.doxyfile + +$EDITOR RELEASE.md From 43e81756297c13b98ab08cc86df7209e3184d39a Mon Sep 17 00:00:00 2001 From: Marina Moreira Date: Thu, 16 Dec 2021 11:32:52 -0800 Subject: [PATCH 14/40] fix release ci --- .github/workflows/ci_release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml index 05820f7d..d47ab4a8 100644 --- a/.github/workflows/ci_release.yml +++ b/.github/workflows/ci_release.yml @@ -2,7 +2,7 @@ name: Build, test and push packages CI on: push: - branches: [ 'release' ] + branches: [ 'master' ] jobs: From dd610ee7f726f37cfe391170a7af506fad426d97 Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Wed, 22 Dec 2021 21:22:38 -0800 Subject: [PATCH 15/40] Make the geometry mapper and streaming mapper handle any camera (#11) * dense_map: Minor harderning of tools and documentation improvements * dense_map: Minor harderning of tools and documentation improvements * First cut at improved refiner * Make lines shorter in clang * More refiner work * Impement depth error * Implement triangulation * Complete setting up the problem * refiner2 add depth for ref cam * Match features using multiple threads * Match features using multiple threads * Refiner: Finish setting up the opt problem * Fix a nasty bug with precision loss in large timestamps * Minor tweak * Add tool to colorize bag images, important for testing support for color * Camera refiner2: Improved robustness * Bugfix for reading an image from a bag, copy to permanent storage * Geometry mapper: Support color throughout * clang-format: Make lines at most 100 characters, easier to manage shorter lines * Support the haz cam texturing in the geometry mapper * dense_map: Spellcheck and style * Fix python style Fix python style * .clang-format: Make it back 120 lines * Rename process_bag to scale_bag * Make geometry_mapper.py accept any camera, present or future, and adjust the doc * Support arbitrary camera in geometry_mapper.cc * Minor adjustments to geometry_mapper.py * More work to use a general camera in the geometry mapper * geometry_mapper: Use a loop rather than three individual calls * More abstraction of the transforms * geometry_mapper: Finish moving to the general way of reading config info * image_picker: Let it use the new config reader * camera_refiner2: Update to the latest config format * Refiner: Use the new config file * camera_calibrator: Update to latest way of saving and reading config files * Minor formatting * Minor formatting * Finish using the new reader and writer everywhere * Make the streaming mapper and geom mapper handle better sim data and haz cam data * Make the streaming mapper and geom mapper handle better sim data and haz cam data * Add haz cam intensity to rviz, turned off by default * iss.rviz: Clarify caption for haz cam * acoustics_cam: Update the paths * Fixes for the geometry mapper with simulated data * Minor bugfix when not using haz cam texture * streaming mapper: Make exposure correction optional. Add sections to the doc * minor python formatting * dense_map: Minor harderning of tools and documentation improvements * Minor style adjustment for geometry_mapper.py * Minor wording adjustments in the doc * geometry_mapper: Cmake compile fix * Make the python linter happy --- astrobee/simulation/acoustics_cam/readme.md | 50 +- astrobee/simulation/isaac_gazebo/readme.md | 13 +- dense_map/geometry_mapper/CMakeLists.txt | 4 +- .../include/dense_map_ros_utils.h | 19 +- .../geometry_mapper/include/dense_map_utils.h | 45 +- dense_map/geometry_mapper/readme.md | 489 +++++++---- .../src/dense_map_ros_utils.cc | 7 + .../geometry_mapper/src/dense_map_utils.cc | 241 +++--- .../geometry_mapper/src/texture_processing.cc | 2 +- .../tools/camera_calibrator.cc | 269 +++--- .../geometry_mapper/tools/camera_refiner.cc | 136 ++- .../geometry_mapper/tools/camera_refiner2.cc | 268 ++---- .../tools/cameras_to_texrecon.py | 12 +- .../tools/extract_pc_from_bag.cc | 86 +- .../geometry_mapper/tools/geometry_mapper.cc | 774 ++++++++---------- .../geometry_mapper/tools/geometry_mapper.py | 257 +++--- .../geometry_mapper/tools/image_picker.cc | 76 +- .../tools/{process_bag.cc => scale_bag.cc} | 0 .../geometry_mapper/tools/streaming_mapper.cc | 335 ++++---- .../config/dense_map/streaming_mapper.config | 1 + isaac/resources/rviz/iss.rviz | 13 + 21 files changed, 1526 insertions(+), 1571 deletions(-) rename dense_map/geometry_mapper/tools/{process_bag.cc => scale_bag.cc} (100%) diff --git a/astrobee/simulation/acoustics_cam/readme.md b/astrobee/simulation/acoustics_cam/readme.md index a57fc13e..4f960a17 100644 --- a/astrobee/simulation/acoustics_cam/readme.md +++ b/astrobee/simulation/acoustics_cam/readme.md @@ -23,25 +23,29 @@ The acoustics camera depends on the pyroomacoustics package. This package can be installed together with its dependencies in a Python 2.7 environment using the command: - pip install numpy==1.15.4 scipy==0.18 pillow==6 PyWavelets==0.4.0 \ - networkx==1.8 matplotlib==2.0.0 scikit-image==0.14 \ - pyroomacoustics==0.3.1 + pip install numpy==1.15.4 scipy==0.18 pillow==6 PyWavelets==0.4.0 \ + networkx==1.8 matplotlib==2.0.0 scikit-image==0.14 \ + pyroomacoustics==0.3.1 It would normally install itself in: - $HOME/.local/lib/python2.7/site-packages/pyroomacoustics + $HOME/.local/lib/python2.7/site-packages/pyroomacoustics ### Running the acoustics camera -The acoustics camera ROS node can be run as part of the simulator as: +The acoustics camera ROS node can be run as part of the simulator. For that, +first set up the environment along the lines of: - source ~/astrobee_build/native/devel/setup.zsh - source ~/projects/isaac/devel/setup.zsh - roslaunch isaac sim.launch world:=iss rviz:=true acoustics_cam:=true \ - pose:="11.2 -7.72 5.0 0 0 0 0 1" + export ASTROBEE_SOURCE_PATH=$HOME/astrobee/src + export ASTROBEE_BUILD_PATH=$HOME/astrobee + export ISAAC_WS=$HOME/isaac + source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ISAAC_WS/devel/setup.bash -Here we assume that the astrobee build is in `~/astrobee_build` and that -the ISAAC build is in `~/projects/isaac`. +then run: + + roslaunch isaac sim.launch world:=iss rviz:=true acoustics_cam:=true \ + pose:="11.2 -7.72 5.0 0 0 0 0 1" One must ensure that the "DEBUG: AcousticsCam" image checkbox is checked in RViz upon launch, to be able to visualize this image. If it @@ -50,17 +54,17 @@ should be saved from the RViz menu, and the simulation should be restarted. It is also possible to start the simulator without this camera, -and then launch it separately via: +and then launch this camera separately as: - source ~/astrobee_build/native/devel/setup.zsh - source ~/projects/isaac/devel/setup.zsh - roslaunch acoustics_cam acoustics_cam.launch output:=screen + source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ISAAC_WS/devel/setup.bash + roslaunch acoustics_cam acoustics_cam.launch output:=screen The acoustics camera can be run without ROS as: - ~/projects/isaac/src/isaac/hardware/acoustics_cam/nodes/acoustics_cam debug_mode + $ISAAC_WS/src/astrobee/simulation/acoustics_cam/nodes/acoustics_cam debug_mode -In that case, it assumes that the robot pose is the value set in the +In that case it assumes that the robot pose is the value set in the field "debug_robot_pose" in acoustics_cam.json (see below). In this mode it will only create a plot of the acoustics cam image. The sources of sounds will be represented as crosses in this plot, and the @@ -70,20 +74,20 @@ camera (microphone) position will be shown as a star. The acoustics camera subscribes to - /loc/truth/pose + /loc/truth/pose to get the robot pose. It publishes its image, camera pose, and camera intrinsics on topics: - /hw/cam_acoustics - /sim/acoustics_cam/pose - /sim/acoustics_cam/info + /hw/cam_acoustics + /sim/acoustics_cam/pose + /sim/acoustics_cam/info By default, the camera takes pictures as often as it can (see the configuration below), which is rarely, in fact, as it is slow. It listens however to the topic - /comm/dds/command + /comm/dds/command for guest science commands that may tell it to take a single picture at a specific time, or to take pictures continuously. Such a command @@ -95,7 +99,7 @@ processed. The behavior of this camera is described in - isaac/hardware/acoustics_cam/acoustics_cam.json + $ISAAC_WS/src/astrobee/simulation/acoustics_cam/acoustics_cam.json It has the following entries: diff --git a/astrobee/simulation/isaac_gazebo/readme.md b/astrobee/simulation/isaac_gazebo/readme.md index 79661374..4a9fd09f 100644 --- a/astrobee/simulation/isaac_gazebo/readme.md +++ b/astrobee/simulation/isaac_gazebo/readme.md @@ -52,10 +52,17 @@ This camera publishes the RGB heat image, its pose and intrinsics, on topics: /sim/heat_cam/pose /sim/heat_cam/info -To launch the simulator and see the heat map, one can do: +To launch the simulator and see the heat map, first set up the environment. +The paths below may need to be adjusted for your system. + + export ASTROBEE_SOURCE_PATH=$HOME/astrobee/src + export ASTROBEE_BUILD_PATH=$HOME/astrobee + export ISAAC_WS=$HOME/isaac + source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ISAAC_WS/devel/setup.bash + +Then run: - source ~/freeflyer_build/native/devel/setup.zsh - source ~/projects/isaac/devel/setup.zsh roslaunch isaac sim.launch world:=iss rviz:=true \ pose:="11.2 -7.72 5.0 0 0 0 0 1" diff --git a/dense_map/geometry_mapper/CMakeLists.txt b/dense_map/geometry_mapper/CMakeLists.txt index 6823fdb6..02c2a76d 100644 --- a/dense_map/geometry_mapper/CMakeLists.txt +++ b/dense_map/geometry_mapper/CMakeLists.txt @@ -187,8 +187,8 @@ add_executable(add_sci_cam_to_bag tools/add_sci_cam_to_bag.cc) target_link_libraries(add_sci_cam_to_bag geometry_mapper_lib gflags glog) -add_executable(process_bag tools/process_bag.cc) -target_link_libraries(process_bag +add_executable(scale_bag tools/scale_bag.cc) +target_link_libraries(scale_bag geometry_mapper_lib gflags glog) add_executable(colorize_bag_images tools/colorize_bag_images.cc) diff --git a/dense_map/geometry_mapper/include/dense_map_ros_utils.h b/dense_map/geometry_mapper/include/dense_map_ros_utils.h index bd89a9c0..c9c1bf9a 100644 --- a/dense_map/geometry_mapper/include/dense_map_ros_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_ros_utils.h @@ -40,6 +40,13 @@ #include #include +// Forward declaration +namespace pcl { + class PointXYZ; + template + class PointCloud; +} + namespace dense_map { // Publish a given pose @@ -72,8 +79,13 @@ bool lookupImage(double desired_time, std::vector const // repeated calls to this function we always travel forward in time, // and we keep track of where we are in the bag using the variable // bag_pos that we update as we go. -bool lookupCloud(double desired_time, std::vector const& bag_msgs, double max_time_diff, - cv::Mat& cloud, int& bag_pos, double& found_time); +bool lookupCloud(double desired_time, std::vector const& bag_msgs, + double max_time_diff, cv::Mat& cloud, int& bag_pos, double& found_time); + +// A wrapper around a function in pcl_ros/point_cloud.h to avoid +// including that header all over the place as it creates an annoying +// warning. +void msgToPcl(sensor_msgs::PointCloud2::ConstPtr pc_msg, pcl::PointCloud & pc); // Read the list of topics in a bag while avoiding repetitions void readTopicsInBag(std::string const& bag_file, std::vector& topics); @@ -81,12 +93,13 @@ void readTopicsInBag(std::string const& bag_file, std::vector& topi // A small struct in which to store an opened ROS bag and the vector of its messages // that we will use later to quickly navigate through it while going forward in time. struct RosBagHandle { + RosBagHandle() = delete; // The rosbag API prevents anything else than initialization RosBagHandle(std::string const& bag_file, std::string const& topic) { + bag_msgs.clear(); bag.open(bag_file, rosbag::bagmode::Read); std::vector topics; topics.push_back(topic); view = boost::shared_ptr(new rosbag::View(bag, rosbag::TopicQuery(topics))); - bag_msgs.clear(); for (rosbag::MessageInstance const m : *view) bag_msgs.push_back(m); } rosbag::Bag bag; diff --git a/dense_map/geometry_mapper/include/dense_map_utils.h b/dense_map/geometry_mapper/include/dense_map_utils.h index 24b03d0c..8be7dff1 100644 --- a/dense_map/geometry_mapper/include/dense_map_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_utils.h @@ -74,18 +74,30 @@ std::string matType(cv::Mat const& mat); // Read the transform from depth to given camera void readCameraTransform(config_reader::ConfigReader& config, std::string const transform_str, - Eigen::MatrixXd& transform); - -// Read a bunch of transforms from the robot calibration file -void readConfigFile(std::string const& navcam_to_hazcam_timestamp_offset_str, - std::string const& scicam_to_hazcam_timestamp_offset_str, - std::string const& hazcam_to_navcam_transform_str, - std::string const& scicam_to_hazcam_transform_str, std::string const& navcam_to_body_transform_str, - std::string const& hazcam_depth_to_image_transform_str, double& navcam_to_hazcam_timestamp_offset, - double& scicam_to_hazcam_timestamp_offset, Eigen::MatrixXd& hazcam_to_navcam_trans, - Eigen::MatrixXd& scicam_to_hazcam_trans, Eigen::MatrixXd& navcam_to_body_trans, - Eigen::Affine3d& hazcam_depth_to_image_transform, camera::CameraParameters& nav_cam_params, - camera::CameraParameters& haz_cam_params, camera::CameraParameters& sci_cam_params); + Eigen::Affine3d& transform); + +// Read some transforms from the robot calibration file +void readConfigFile // NOLINT +(// Inputs // NOLINT + std::vector const& cam_names, // NOLINT + std::string const& nav_cam_to_body_trans_str, // NOLINT + std::string const& haz_cam_depth_to_image_trans_str, // NOLINT + // Outputs // NOLINT + std::vector & cam_params, // NOLINT + std::vector & nav_to_cam_trans, // NOLINT + std::vector & nav_to_cam_timestamp_offset, // NOLINT + Eigen::Affine3d & nav_cam_to_body_trans, // NOLINT + Eigen::Affine3d & haz_cam_depth_to_image_trans); // NOLINT + +// Save some transforms from the robot calibration file. This has some very fragile +// logic and cannot handle comments in the config file. +void updateConfigFile // NOLINT +(std::vector const& cam_names, // NOLINT + std::string const& haz_cam_depth_to_image_trans_str, // NOLINT + std::vector const& cam_params, // NOLINT + std::vector const& nav_to_cam_trans, // NOLINT + std::vector const& nav_to_cam_timestamp_offset, // NOLINT + Eigen::Affine3d const& haz_cam_depth_to_image_trans); // NOLINT // Given two poses aff0 and aff1, and 0 <= alpha <= 1, do linear interpolation. Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, Eigen::Affine3d const& aff1); @@ -138,15 +150,6 @@ double fileNameToTimestamp(std::string const& file_name); // Create a directory unless it exists already void createDir(std::string const& dir); -// Modify in-place the robot config file -void update_config_file(bool update_cam1, std::string const& cam1_name, - boost::shared_ptr cam1_params, bool update_cam2, - std::string const& cam2_name, boost::shared_ptr cam2_params, - bool update_depth_to_image_transform, Eigen::Affine3d const& depth_to_image_transform, - bool update_extrinsics, Eigen::Affine3d const& cam2_to_cam1_transform, - bool update_timestamp_offset, std::string const& cam1_to_cam2_timestamp_offset_str, - double cam1_to_cam2_timestamp_offset); - // A little holding structure for nav, sci, and haz poses struct CameraPoses { std::map haz_depth_to_image_timestamps; diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 84dfa173..38031f8d 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -75,24 +75,8 @@ about which of the two paradigms one refers to. ## Additional sensors present only in simulation A number of robot sensors are present only as part of the simulator. -Those are the heat camera, described in: +This is discussed further down the document. - $ISAAC_WS/src/astrobee/simulation/gazebo/readme.md - -and the acoustics camera, documented at - - $ISAAC_WS/src/astrobee/simulation/acoustics_cam/readme.md - -These tools are implemented and documented in different places -in the source tree because the first one is a gazebo plugin -while the second one is a Python node. - -See also - - $ASTROBEE_SOURCE_PATH/simulation/readme.md - -for information about the simulated nav_cam, haz_cam, and sci_cam. - ## Compiling the software It is assumed that by now the Astrobee and ISAAC software is compiled. @@ -149,7 +133,9 @@ experimental remeshing tool. ### With real data -Acquire a bag of data on the bot as follows. +Acquire a bag of data on the bot. The current approach is to use a +recording profile. A step-by-step procedure is outlined below if a +recording profile has not been set up. First give the bot the ability to acquire intensity data with the depth camera (haz_cam). For that, connect to the MLP processor of the @@ -209,7 +195,15 @@ Copy the resulting bag off the robot. ### With simulated data -Edit the simulation configuration, in +#### Cameras present in simulation + +The astrobee simulator supports a handful of cameras, including +``nav_cam``, ``sci_cam``, ``haz_cam``, ``heat_cam``, +``acoustics_cam``, etc. All these have been tested with the gemetry +mapper and streaming mapper. + +The ``sci_cam`` and ``haz_cam`` cameras are not enabled by default in +the simulator. To enable them, edit the simulation configuration, in $ASTROBEE_SOURCE_PATH/astrobee/config/simulation/simulation.config @@ -220,32 +214,66 @@ and set: sci_cam_continuous_picture_taking = true; The later will make sure sci cam pictures are taken automatically. If -custom behavior is desired, see +custom behavior is desired, see: $ISAAC_WS/src/astrobee/behaviors/inspection/readme.md +More information about the simulated nav_cam, haz_cam, and sci_cam is +at: + + $ASTROBEE_SOURCE_PATH/simulation/readme.md + +The heat camera is described in: + + $ISAAC_WS/src/astrobee/simulation/gazebo/readme.md + +The acoustics camera and how to enable it is documented at: + + $ISAAC_WS/src/astrobee/simulation/acoustics_cam/readme.md + +#### Recording simulated data + Start the simulator, such as: source $ASTROBEE_BUILD_PATH/devel/setup.bash source $ISAAC_WS/devel/setup.bash - roslaunch isaac sim.launch rviz:=true pose:="11.0 -7.0 5.0 0 0 0 1" world:=iss + roslaunch isaac sim.launch rviz:=true dds:=false \ + pose:="11.0 -7.0 5.0 0 0 0 1" world:=iss -In RVIZ turn on visualizing these cameras from the Panels menu. +In rviz turn on visualizing the desired cameras cameras from the +Panels menu, as otherwise the needed camera topics may not be +published. -When recording simulated data, need to also save the camera poses and -camera information, which is different than how we do for real data, -when these quantities must be estimated by algorithms. Hence the -record command to run is: +Adjust the 3D view in rviz with the mouse so that the robot, which is +present in the middle of the module, can be seen. - rosbag record /hw/depth_haz/points \ - /hw/depth_haz/extended/amplitude_int /hw/cam_sci \ - /sim/haz_cam/pose /sim/sci_cam/pose /sim/sci_cam/info +When recording simulated data for a given camera, for example, for +``sci_cam``, so that later it can be used with the geometry and +streaming mapper, one needs to record, in addition to depth clouds and +camera images, also the camera poses and intrinsics information, which +is done as follows: -Note that we record no nav cam data at all since that one is needed -only for computing camera poses that are pre-computed in -simulation. Also the sci cam topic changed, compared to the earlier -record command, as the simulator returns uncompressed images (they are -grayscale as well, unlike for the real sci cam). + rosbag record /hw/depth_haz/points \ + /hw/cam_sci/compressed /sim/sci_cam/pose /sim/sci_cam/info + +It is good to check for the precise name of the camera image topic. +For haz cam the image topic will be instead: + + /hw/depth_haz/extended/amplitude_int + +For nav cam, the image topic will be: + + /hw/cam_nav + +and the same convention is followed for the rest of the cameras. + +If desired to record data for many cameras, these topics must +be specified for each of them. For example, for ``heat_cam`` add +the lines: + + /hw/cam_heat /sim/heat_cam/pose /sim/heat_cam/info + +to the ``rosbag record`` command. The robot can be told to move around by either running a plan, or by sending it a move command, such as: @@ -302,14 +330,18 @@ the fact that the calibration was done at reduced resolution. To accomplish this processing, once the sci cam data is integrated into the bag, one can do the following: - $ISAAC_WS/devel/lib/geometry_mapper/process_bag -input_bag input.bag \ - -output_bag output.bag --image_type grayscale --scale 0.25 + $ISAAC_WS/devel/lib/geometry_mapper/scale_bag --input_bag input.bag \ + --output_bag output.bag --image_type grayscale --scale 0.25 Note that the processed sci cam images will be now on topic `/hw/cam_sci2`. ## Camera calibration +Currently the calibrator solution is not that accurate. It is suggested +to use instead camera_refiner (see further down) on a bag acquired +without a calibration target. + Camera calibration is an advanced topic. Likely your robot's cameras have been calibrated by now, and then this step can be skipped. @@ -342,12 +374,18 @@ builds upon the instructions used in the doc referenced right above.) source $KALIBR_WS/devel/setup.bash rosrun kalibr kalibr_calibrate_cameras \ - --topics /hw/cam_nav /hw/depth_haz/extended/amplitude_int /hw/cam_sci2 \ + --topics /mgt/img_sampler/nav_cam/image_record \ + /hw/depth_haz/extended/amplitude_int \ + /hw/cam_sci2 \ --models pinhole-fov pinhole-radtan pinhole-radtan \ --target $ASTROBEE_SOURCE_PATH/scripts/calibrate/config/granite_april_tag.yaml \ --bag calibration.bag --dont-show-report --verbose \ --target_corners_dirs calib_nav calib_haz calib_sci +Note that above we assume that the image sampler was used to collect a +subset of the nav cam images. Otherwise the nav cam topic would be +/hw/cam_nav. + This will create three directories with the corners extracted from the nav, haz, and sci cameras. @@ -557,19 +595,15 @@ down this document, in the section on camera refinement. Nav cam images can be extracted from a bag as follows: $ASTROBEE_BUILD_PATH/devel/lib/localization_node/extract_image_bag \ - mydata.bag -image_topic /hw/cam_nav -output_directory nav_data \ - -use_timestamp_as_image_name + mydata.bag -image_topic /mgt/img_sampler/nav_cam/image_record \ + -output_directory nav_data -use_timestamp_as_image_name The last option, `-use_timestamp_as_image_name`, must not be missed. It makes it easy to look up the image acquisition based on image name, and this is used by the geometry mapper. -Note that bags acquired on the ISS usually have the nav cam image topic -as: - - /mgt/img_sampler/nav_cam/image_record - -Then the command above needs to be adjusted accordingly. +One should check beforehand if the nav cam topic is correct. If the image +sampler was not used, the nav cam topic would be /hw/cam_nav. To extract the sci cam data, if necessary, do: @@ -625,9 +659,13 @@ sci cam images with the geometry mapper. ## When using real data -The geometry mapper can handle both color and grayscale images, and -both at full and reduced resolution. (With the sci cam topic name -being different at reduced resolution.) +The geometry mapper fuses the depth cloud data and creates textures +from the image cameras. + +Any image camera is supported, as long as present in the robot +configuration file and a topic for it is in the bag file (see more +details further down). The geometry mapper can handle both color and +grayscale images, and, for sci cam, both full and reduced resolution. Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, `ASTROBEE_BUILD_PATH`, and `ISAAC_WS` as earlier. Run: @@ -641,11 +679,10 @@ Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, python $ISAAC_WS/src/dense_map/geometry_mapper/tools/geometry_mapper.py \ --ros_bag data.bag \ --sparse_map nav_data.map \ - --nav_cam_topic /hw/cam_nav \ + --camera_types "sci_cam nav_cam haz_cam" \ + --camera_topics "/hw/cam_sci/compressed /mgt/img_sampler/nav_cam/image_record /hw/depth_haz/extended/amplitude_int"\ + --undistorted_crop_wins "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160" \ --haz_cam_points_topic /hw/depth_haz/points \ - --haz_cam_intensity_topic /hw/depth_haz/extended/amplitude_int \ - --sci_cam_topic /hw/cam_sci/compressed \ - --camera_type all \ --start 0 \ --duration 1e+10 \ --sampling_spacing_seconds 5 \ @@ -671,14 +708,20 @@ Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, Parameters: - --ros_bag: A ROS bag with recorded nav_cam, haz_cam, and sci_cam data. + --ros_bag: A ROS bag with recorded image and point cloud data. --sparse_map: A registered SURF sparse map of a subset of the nav_cam data. - --nav_cam_topic: The nav cam image topic in the bag file. - --haz_cam_points_topic: The haz cam point cloud topic in the bag file. - --haz_cam_intensity_topic: The haz cam intensity topic in the bag file. - --sci_cam_topic: The sci cam image topic in the bag file. - --camera_type: Specify which cameras to process. Options: all (default), - nav_cam, and sci_cam (with simulated data only sci_cam is supported). + --camera_types: Specify the cameras to use for the textures, as a list + in quotes. Default: "sci_cam nav_cam haz_cam". With simulated + data only ``sci_cam`` is supported. + --camera_topics: Specify the bag topics for the cameras to texture + (in the same order as in ``--camera_types``). Use a list in quotes. + The default is in the usage above. + --undistorted_crop_wins: The central region to keep after + undistorting an image and before texturing. For sci_cam the + numbers are at 1/4th of the full resolution and will be adjusted + for the actual input image dimensions. Use a list in quotes. The + default is "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160". + --haz_cam_points_topic: The depth point cloud topic in the bag file. --start: How many seconds into the bag to start processing the data. --duration: For how many seconds to do the processing. --sampling_spacing_seconds: How frequently to sample the sci and haz @@ -767,8 +810,8 @@ Parameters: registered map. --external_mesh: Use this mesh to texture the images, rather than creating one from depth data in the current bag. - --scicam_to_hazcam_timestamp_offset_override_value: Override the - value of scicam_to_hazcam_timestamp_offset from the robot config + --nav_cam_to_sci_cam_offset_override_value: Override the + value of nav_cam_to_sci_cam_timestamp_offset from the robot config file with this value. --verbose: If specified, echo all output in the terminal. --save_debug_data: If specified, save many intermediate datasets @@ -782,10 +825,15 @@ with Meshlab: - data_dir/hole_filled_mesh.ply: A mesh obtained by filling holes in the fused mesh. - data_dir/clean_mesh.ply: The mesh with small connected components removed. - data_dir/smooth_mesh2.ply: A further smoothed version of the mesh. + - data_dir/hole_filled_mesh2.ply: A further hole-filled mesh. - data_dir/simplified_mesh.ply: The simplified mesh. + - data_dir/smooth_mesh3.ply: A further smoothed version of the mesh. - data_dir/nav_cam_texture/run.obj: The mesh overlayed with the nav cam texture. - data_dir/sci_cam_texture/run.obj: The mesh overlayed with the sci cam texture. +(Several passes of smoothing and hole-filling, as done above, appear +necessary from experiments.) + Intermediate products are the undistorted nav cam and sci cam images. It is suggested to review those in an image viewer, such as 'eog' and delete some of them if they cause artifacts. After that, the last part @@ -800,7 +848,7 @@ restricted to the range of timestamps contained within the sparse map (this is another restriction, in addition to `--start` and `--duration`). -If this tool is too slow, or if localization fails, consider tweaking +If this tool is too slow, or if localization fails, consider adjusting the --localization_options above. For example, to make localization work better (which will make the program slower) decrease the value of --default_surf_threshold, and increase --early_break_landmarks, @@ -828,35 +876,68 @@ with the option --external_mesh. The most time-consuming part of the geometry mapper is computing the initial poses, which is the earliest step, or step 0. To resume the -geometry mapper at any step, use the option '--start_step num', where -0 <= num and num <= 7. For example, one may want to apply further -smoothing to the mesh or more hole-filling, before resuming with the -next steps. +geometry mapper at any step, use the option '--start_step num'. For +example, one may want to apply further smoothing to the mesh or more +hole-filling, before resuming with the next steps. + +For a given camera type to be textured it must have entries in +``cameras.config`` and the robot config file (such as +``bumble.config``), which are analogous to existing +``nav_cam_to_sci_cam_timestamp_offset``, +``nav_cam_to_sci_cam_transform``, and ``sci_cam`` intrinsics, with +"sci" replaced by your camera name. The geometry mapper arguments +``--camera_types``, ``--camera_topics``, and +``--undistorted_crop_wins`` must be populated accordingly, with some +careful choice to be made for the last one. Images for the desired +camera must be present in the bag file at the the specified topic. ## With simulated data -When working with simulated data, the flag - - --simulated_data +The geometry mapper works with any simulated cameras not having +distortion. It was tested to work with simulated images for +``sci_cam``, ``haz_cam``, ``heat_cam``, and ``acoustics_cam``. It does +not work with ``nav_cam``, which has distortion. -should be passed to this tool. Also, the sci cam image topic option -should be: +The flag: - --sci_cam_topic /hw/cam_sci + --simulated_data -It is not necessary to produce or pass in a sparse map with simulated -data. The nav cam and the camera information provided by the value of -`ASTROBEE_ROBOT` will be ignored. Instead, camera poses and -information will be read from +should be passed to the geometry mapper. The sparse map is not +necessary, no localization will take place, and intrinsics +information, camera transform, and timestamp offset will not be read +from the robot configuration file. Instead, it is expected that each +simulated camera, for example ``sci_cam``, will provide, in addition to an +image topic, the topics - /sim/haz_cam/pose /sim/sci_cam/pose /sim/sci_cam/info -For this operation it is suggested to pick a portion of the bag where -the images face the wall as much as possible, so one may need to -change the `-start` and `-duration` values. That will result in the -best possible output. +having the pose and intrinsics of each camera image. These should be +recorded in the bag (see more on recording earlier in the document). + +It is assumed that the simulated images are not distorted. In particular, +``nav_cam``, which has fisheye lens distortion, is not supported. + +The simulated haz_cam is required to be among the topics being recorded +and read in, because its pose is needed to process the depth clouds. + +Example of running the geometry mapper with simulated data: + + sci_topic=/hw/cam_sci/compressed + haz_topic=/hw/depth_haz/extended/amplitude_int + python $ISAAC_WS/src/dense_map/geometry_mapper/tools/geometry_mapper.py \ + --simulated_data \ + --ros_bag data.bag \ + --camera_types "sci_cam haz_cam" \ + --camera_topics "$sci_topic $haz_topic" \ + --haz_cam_points_topic /hw/depth_haz/points \ + --output_dir data_dir \ + --sampling_spacing_seconds 2 \ + --dist_between_processed_cams 0.1 \ + --verbose + +It is important to check for the correct names for the camera image +topics are passed to ``--camera_topics``. ## Running the streaming mapper @@ -872,6 +953,8 @@ obtained textured model to be visualized. ### Running with real data +#### When the robot (or nav cam) poses are known + To run the streaming mapper with real data for the given bot, do: source $ASTROBEE_BUILD_PATH/devel/setup.bash @@ -883,145 +966,207 @@ To run the streaming mapper with real data for the given bot, do: roslaunch isaac isaac_astrobee.launch llp:=disabled nodes:=framestore \ streaming_mapper:=true output:=screen -This will load the mesh produced by the geometry mapper from +Wait until it finishes forming the texture model, which may take +30 seconds on more. + +Ensure that the ASTROBEE_ROBOT name is correct above. + +This node will load the mesh produced by the geometry mapper from $ISAAC_WS/src/isaac/resources/geometry/${ASTROBEE_WORLD}.ply +If an updated geometry mapper mesh exists, it should be copied to that +location first. + It will listen to the robot body pose being published at: /gnc/ekf -and the sci cam image data at: +The tool will read the configuration options from: + + $ISAAC_WS/src/isaac/config/dense_map/streaming_mapper.config + +which specifies the camera image to texture. The image topic must be set, +which, for ``sci_cam`` is normally /hw/cam_sci/compressed + +while for other ``haz_cam`` is: + + /hw/depth_haz/extended/amplitude_int + +and for other cameras, such as ``nav_cam``, etc., it is of form: + + /hw/cam_nav + +For the sci cam, it also expects image exposure data on the topic: + /hw/sci_cam_exif -It will publish the the texture on the topics: +to do exposure correction, unlesss in simulation mode or +``sci_cam_exposure_correction`` is set to false. This is not needed +for other cameras. + +The streaming mapper will publish several topics having texture +information, which for ``sci_cam`` are: /ism/sci_cam/img /ism/sci_cam/obj /ism/sci_cam/mtl -(when the texture type is sci_cam, and this is customizable, see -below). +and an analogous convention is followed for other cameras. + +In order for the the streaming mapper to produce texture it should +receive input information with its needed topics, either from a bag or +from the robot in real time. The header.stamp value for each published message will be the same as -the header.stamp for the corresponding input sci cam image. +the header.stamp for the corresponding input camera image. -Next one can play a bag having the input topics expected by the robot. -(See the note below on perhaps redirecting the nav cam topic if the -localization node is necessary.) +The data produced by the streaming mapper can be recorded with: -Additional configuration option for this tool can be specified in + rosbag record /ism/sci_cam/img /ism/sci_cam/obj /ism/sci_cam/mtl \ + -b 10000 + +The recording should start before the input bag is played. The `-b` +option tells ROS to increase its recording buffer size, as sometimes +the streaming mapper can publish giant meshes. - $ISAAC_WS/src/isaac/config/dense_map/streaming_mapper.config +The robot pose that the streaming mapper needs assumes a very accurate +calibration of the IMU sensor in addition to the nav, haz, and sci cam +sensors, and very accurate knowledge of the pose of these sensors on +the robot body. If that is not the case, it is suggested to use the +nav cam pose via the ``nav_cam_pose_topic`` field in +streaming_mapper.config, for which only accurate calibration of the +nav, sci, and haz cameras among each other is assumed. -which has the following fields: +The input texture can be in color or grayscale, at full or reduced +resolution, and compressed or not. - - mesh_file: Override the location of the mesh described earlier. - - ekf_state_topic: The topic to listen to for robot pose information. - It can be set to /gnc/ekf, whose type is ff_msgs::EkfState. - - ekf_pose_topic: An alternative topic for the robot pose. It can - be set to /loc/pose, of type geometry_msgs::PoseStamped. - - nav_cam_pose_topic: The preferred topic for the pose. This - is the pose of the nav cam, not of the robot, and can be set to - /loc/ml/features. It is published by the localization node. - - texture_cam_type: The camera that creates the texture images - (can be nav_cam, sci_cam, haz_cam, and in simulation also - heat_cam, and acoustics_cam). The default is sci_cam. This - field affects the name of the topics on which the streaming - mapper publishes its output. - - texture_cam_topic: The topic having the texture to overlay. - The default value is /hw/cam_sci/compressed. - - dist_between_processed_cams: Once an image is textured and - published, how far the camera should move (in meters) before - it should process another texture (any images arriving - in between will be ignored). The default is 0.1 meters. - - max_iso_times_exposure: Apply the inverse gamma transform to - images, multiply them by max_iso_times_exposure/ISO/exposure_time - to adjust for lightning differences, then apply the gamma - transform back. This value should be set to the maximum observed - ISO * exposure_time. The default is 5.1. Not used with simulated - data. - - use_single_texture: Use a single texture buffer. Sample - the images by picking points on each triangle face with spacing - pixel_size. This can take a couple of minutes to form the - necessary structures to be able to stream the texture. - - pixel_size: The pixel size to use with use_single_texture. - The default is 0.001 meters. - - num_threads: Number of threads to use. The default is 48. - - save_to_disk: If to save to disk an .obj file for each topic - published, to be debugged in Meshlab. These will be saved in - ~/.ros. The default is 'false'. +#### Running with no robot or nav cam pose information -If no pose information is present, for example, if the EKF or -localization node was not running when the data was acquired, or this -data was not recorded, the localization node can be started by +If no robot body or nav cam pose information is present, for example, +if the EKF or localization node was not running when the image data was +acquired, or this data was not recorded or was not reliable, the +localization node can be started together with the streaming mapper by modifying the above `roslaunch` command as follows: + export ASTROBEE_ROBOT=bsharp2 roslaunch isaac isaac_astrobee.launch llp:=disabled \ nodes:=framestore,localization_node \ streaming_mapper:=true output:=screen -Alternatively, the streaming mapper can be started first, -without localization, such as: +and then enabled by running in a separate terminal: - roslaunch streaming_mapper.launch sim_mode:=false output:=screen + rosservice call /loc/ml/enable true -(the full path to streaming_mapper.launch needs to be specified if not -found). +Alternatively, the streaming mapper can be started first, +without localization, as: + + export ASTROBEE_WORLD=iss + export ASTROBEE_ROBOT=bsharp2 # the robot name should be checked + source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ISAAC_WS/devel/setup.bash + export ISAAC_RESOURCE_DIR=${ISAAC_WS}/src/isaac/resources + export ISAAC_CONFIG_DIR=${ISAAC_WS}/src/isaac/config + roslaunch $ISAAC_WS/src/dense_map/geometry_mapper/launch/streaming_mapper.launch \ + sim_mode:=false output:=screen Ensure then that nav_cam_pose_topic is set to /loc/ml/features in -streaming_mapper.config and that ekf_state_topic is empty. +streaming_mapper.config and that ekf_state_topic is empty. Also +check the robot name, as earlier. Then the localization node can be started separately, as: + export ASTROBEE_ROBOT=bsharp2 roslaunch astrobee astrobee.launch llp:=disabled \ nodes:=framestore,localization_node output:=screen -and then enabled by running in a separate terminal: - - rosservice call /loc/ml/enable true +with the same environment, including the robot name, and then can be +enabled with a rosservice call as earlier. This node will expect the nav cam pose to be published on topic -`/hw/cam_nav`. If it is on a different topic, such as -`/mgt/img_sampler/nav_cam/image_record`, it needs to be redirected to -this one when playing the bag, such as: +``/hw/cam_nav``. If it is on a different topic, such as +``/mgt/img_sampler/nav_cam/image_record``, it needs to be redirected +to this one when playing the bag, such as: - rosbag play data.bag \ - /mgt/img_sampler/nav_cam/image_record:=/hw/cam_nav + rosbag play data.bag \ + /mgt/img_sampler/nav_cam/image_record:=/hw/cam_nav \ + /loc/ml/features:=/tmp/features +Above the /loc/ml/features topic which may exist in the bag is +redirected to /tmp/features since the currently started localization +node will create new such features. + The localization node, if needed, will make use of a registered sparse map with BRISK features, histogram equalization, and a vocabulary -database to find the nav cam poses. See build_map.md in the astrobee -repository how such a map can be created and where it should be -copied. If running on a local machine, it should go to: +database to find the nav cam poses. The command for building such a +BRISK map from a registered SURF map is: + + cp surf_map.map brisk_map.map + $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/build_map \ + --output_map brisk_map.map --rebuild --histogram_equalization \ + --vocab_db + +See: + + $ASTROBEE_SOURCE_PATH/localization/sparse_mapping/readme.md + +for more information. + +If running on a local machine, after the map is rebuilt it should be +copied to: $ASTROBEE_RESOURCE_DIR/maps/${ASTROBEE_WORLD}.map +(The ``maps`` directory needs to be created if missing.) + Also see a note earlier in the text for how to reduce the BRISK thresholds if the map has too few features. For more details on what the localization node expects, see build_map.md, in the section about running this node on a local machine. -The data produced by the streaming mapper can be recorded with: +#### The streaming mapper configuration file - rosbag record /ism/sci_cam/img /ism/sci_cam/obj /ism/sci_cam/mtl \ - -b 10000 - -The recording should start before the input bag is played. The `-b` -option tells ROS to increase its recording buffer size, as sometimes -the streaming mapper can publish giant meshes. - -The robot pose that the streaming mapper needs assumes a very accurate -calibration of the IMU sensor in addition to the nav, haz, and sci cam -sensors, and very accurate knowledge of the pose of these sensors on -the robot body. If that is not the case, it is suggested to use the nav -cam pose via the nav_cam_pose_topic field, for which only accurate -calibration of the nav, sci, and haz cameras among each other is assumed. +The ``streaming_mapper.config`` file has following fields: -The input texture can be in color or grayscale, at full or reduced -resolution, and compressed or not. + - mesh_file: Override the location of the mesh described earlier. + - ekf_state_topic: The topic to listen to for robot pose information. + It can be set to /gnc/ekf, whose type is ff_msgs::EkfState. + - ekf_pose_topic: An alternative topic for the robot pose. It can + be set to /loc/pose, of type geometry_msgs::PoseStamped. + - nav_cam_pose_topic: The preferred topic for the pose. This + is the pose of the nav cam, not of the robot, and can be set to + /loc/ml/features. It is published by the localization node. + - texture_cam_type: The camera that creates the texture images + (can be nav_cam, sci_cam, haz_cam, and in simulation also + heat_cam, and acoustics_cam). The default is sci_cam. This + field affects the name of the topics on which the streaming + mapper publishes its output. + - texture_cam_topic: The topic having the texture to overlay. + The default value is /hw/cam_sci/compressed. + - dist_between_processed_cams: Once an image is textured and + published, how far the camera should move (in meters) before + it should process another texture (any images arriving + in between will be ignored). The default is 0.1 meters. + - max_iso_times_exposure: Apply the inverse gamma transform to + images, multiply them by max_iso_times_exposure/ISO/exposure_time + to adjust for lightning differences, then apply the gamma + transform back. This value should be set to the maximum observed + ISO * exposure_time. The default is 5.1. Not used with simulated + data or when sci_cam_exposure_correction if false. + - sci_cam_exposure_correction: If set to 'true', correct the + exposure of sci_cam images. + - use_single_texture: If set to 'true', use a single texture + buffer. Sample the images by picking points on each triangle face + with spacing pixel_size. This can take a couple of minutes to form + the necessary structures to be able to stream the texture. + - pixel_size: The pixel size to use with use_single_texture. + The default is 0.001 meters. + - num_threads: Number of threads to use. The default is 48. + - save_to_disk: If set to 'true', save to disk an .obj file for each + topic published, to be debugged in Meshlab. These will be saved in + ~/.ros. The default is 'false'. ## Running with simulated data @@ -1037,10 +1182,10 @@ To launch the streaming mapper, one can do the following: export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss - roslaunch isaac sim.launch world:=iss rviz:=true streaming_mapper:=true \ - pose:="11.0 -7.0 5.0 0 0 0 1" + roslaunch isaac sim.launch world:=iss rviz:=true \ + streaming_mapper:=true pose:="11.0 -7.0 5.0 0 0 0 1" -and then the robot can be moved in order to create images to texture as: +Then the robot can be moved in order to create images to texture as: rosrun mobility teleop -move -pos "11.0 -5.0 5.0" -tolerance_pos 0.0001 \ -att "0 0 0 1" @@ -1103,7 +1248,7 @@ camera's time is first adjusted for the time offset before any of this happens.) One may consider using a bracket length of 1.0 seconds if the bot is known to move quickly right after taking a sci cam picture. -The option --scicam_to_hazcam_timestamp_offset_override_value can be +The option --nav_cam_to_sci_cam_offset_override_value can be used if the given bag is suspected to have a different value of this offset. Such an option the option can be passed also to the camera refiner below and to the geometry mapper. @@ -1233,11 +1378,11 @@ Note how we used the same bracket length as in the image picker. This tool will print some statistics showing the reprojection residuals for all camera combinations. This can be helpful -in figuring out if the value of scicam_to_hazcam_timestamp_offset +in figuring out if the value of nav_cam_to_sci_cam_timestamp_offset is correct. If this value is not known well, this tool can be run with zero or more iterations and various values of - --scicam_to_hazcam_timestamp_offset_override_value + --nav_cam_to_sci_cam_offset_override_value to see which value gives the smallest residuals. The geometry mapper cam be run with various obtained calibrated files, and see which @@ -1322,8 +1467,8 @@ This program's options are: option --fix_map is used). --num_opt_threads: How many threads to use in the optimization. The default is 16. - --scicam_to_hazcam_timestamp_offset_override_value: - Override the value of scicam_to_hazcam_timestamp_offset from the + --nav_cam_to_sci_cam_offset_override_value: + Override the value of nav_cam_to_sci_cam_timestamp_offset from the config file with this value. --mesh: Refine the sci cam so that the sci cam texture agrees with the nav cam texture when projected on this mesh. diff --git a/dense_map/geometry_mapper/src/dense_map_ros_utils.cc b/dense_map/geometry_mapper/src/dense_map_ros_utils.cc index 23e74a3e..5bdfca11 100644 --- a/dense_map/geometry_mapper/src/dense_map_ros_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_ros_utils.cc @@ -347,6 +347,13 @@ bool lookupCloud(double desired_time, std::vector const return false; } +// A wrapper around a function in pcl_ros/point_cloud.h to avoid +// including that header all over the place as it creates an annoying +// warning. +void msgToPcl(sensor_msgs::PointCloud2::ConstPtr pc_msg, pcl::PointCloud& pc) { + pcl::fromROSMsg(*pc_msg, pc); +} + // Read the list of topics in a bag while avoiding repetitions void readTopicsInBag(std::string const& bag_file, std::vector& topics) { if (!boost::filesystem::exists(bag_file)) LOG(FATAL) << "Bag does not exist: " << bag_file; diff --git a/dense_map/geometry_mapper/src/dense_map_utils.cc b/dense_map/geometry_mapper/src/dense_map_utils.cc index 29ff99d4..f4e509ee 100644 --- a/dense_map/geometry_mapper/src/dense_map_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_utils.cc @@ -183,67 +183,83 @@ std::string matType(cv::Mat const& mat) { // Read the transform from depth to given camera void readCameraTransform(config_reader::ConfigReader& config, std::string const transform_str, - Eigen::MatrixXd& transform) { + Eigen::Affine3d& transform) { Eigen::Vector3d T; Eigen::Quaterniond R; if (!msg_conversions::config_read_transform(&config, transform_str.c_str(), &T, &R)) - LOG(FATAL) << "Unspecified transform: " << transform_str; + LOG(FATAL) << "Unspecified transform: " << transform_str << " for robot: " + << getenv("ASTROBEE_ROBOT") << "\n"; R.normalize(); - Eigen::Affine3d affine_trans = Eigen::Affine3d(Eigen::Translation3d(T.x(), T.y(), T.z())) * Eigen::Affine3d(R); - transform = affine_trans.matrix(); -} - -// Read a bunch of transforms from the robot calibration file -void readConfigFile(std::string const& navcam_to_hazcam_timestamp_offset_str, - std::string const& scicam_to_hazcam_timestamp_offset_str, - std::string const& hazcam_to_navcam_transform_str, - std::string const& scicam_to_hazcam_transform_str, std::string const& navcam_to_body_transform_str, - std::string const& hazcam_depth_to_image_transform_str, double& navcam_to_hazcam_timestamp_offset, - double& scicam_to_hazcam_timestamp_offset, Eigen::MatrixXd& hazcam_to_navcam_trans, - Eigen::MatrixXd& scicam_to_hazcam_trans, Eigen::MatrixXd& navcam_to_body_trans, - Eigen::Affine3d& hazcam_depth_to_image_transform, camera::CameraParameters& nav_cam_params, - camera::CameraParameters& haz_cam_params, camera::CameraParameters& sci_cam_params) { + transform = Eigen::Affine3d(Eigen::Translation3d(T.x(), T.y(), T.z())) * Eigen::Affine3d(R); +} + +// Read some transforms from the robot calibration file +void readConfigFile // NOLINT +(// Inputs // NOLINT + std::vector const& cam_names, // NOLINT + std::string const& nav_cam_to_body_trans_str, // NOLINT + std::string const& haz_cam_depth_to_image_trans_str, // NOLINT + // Outputs // NOLINT + std::vector & cam_params, // NOLINT + std::vector & nav_to_cam_trans, // NOLINT + std::vector & nav_to_cam_timestamp_offset, // NOLINT + Eigen::Affine3d & nav_cam_to_body_trans, // NOLINT + Eigen::Affine3d & haz_cam_depth_to_image_trans){ // NOLINT config_reader::ConfigReader config; config.AddFile("cameras.config"); config.AddFile("transforms.config"); if (!config.ReadFiles()) LOG(FATAL) << "Failed to read config files."; - readCameraTransform(config, hazcam_to_navcam_transform_str, hazcam_to_navcam_trans); - readCameraTransform(config, scicam_to_hazcam_transform_str, scicam_to_hazcam_trans); - readCameraTransform(config, navcam_to_body_transform_str, navcam_to_body_trans); - - if (!config.GetReal(navcam_to_hazcam_timestamp_offset_str.c_str(), &navcam_to_hazcam_timestamp_offset)) - LOG(FATAL) << "Could not read value of " << navcam_to_hazcam_timestamp_offset_str - << " for robot: " << getenv("ASTROBEE_ROBOT"); + cam_params.clear(); + nav_to_cam_trans.clear(); + nav_to_cam_timestamp_offset.clear(); + for (size_t it = 0; it < cam_names.size(); it++) { + camera::CameraParameters params = camera::CameraParameters(&config, cam_names[it].c_str()); + cam_params.push_back(params); + + std::string trans_str = "nav_cam_to_" + cam_names[it] + "_transform"; + if (cam_names[it] == "nav_cam") { + // transforms from nav cam to itself + nav_to_cam_trans.push_back(Eigen::Affine3d::Identity()); + nav_to_cam_timestamp_offset.push_back(0.0); + } else { + Eigen::Affine3d trans; + readCameraTransform(config, trans_str, trans); + nav_to_cam_trans.push_back(trans); + + std::string offset_str = "nav_cam_to_" + cam_names[it] + "_timestamp_offset"; + double offset; + if (!config.GetReal(offset_str.c_str(), &offset)) + LOG(FATAL) << "Could not read value of " << offset_str + << " for robot: " << getenv("ASTROBEE_ROBOT"); + nav_to_cam_timestamp_offset.push_back(offset); + } + } - if (!config.GetReal(scicam_to_hazcam_timestamp_offset_str.c_str(), &scicam_to_hazcam_timestamp_offset)) - LOG(FATAL) << "Could not read value of " << scicam_to_hazcam_timestamp_offset_str - << " for robot: " << getenv("ASTROBEE_ROBOT"); + // Read the remaining data + readCameraTransform(config, nav_cam_to_body_trans_str, nav_cam_to_body_trans); Eigen::MatrixXd M(4, 4); - config_reader::ConfigReader::Table mat(&config, hazcam_depth_to_image_transform_str.c_str()); + config_reader::ConfigReader::Table mat(&config, haz_cam_depth_to_image_trans_str.c_str()); int count = 0; for (int row = 0; row < M.rows(); row++) { for (int col = 0; col < M.cols(); col++) { count++; // note that the count stats from 1 if (!mat.GetReal(count, &M(row, col))) { - LOG(FATAL) << "Could not read value of " << hazcam_depth_to_image_transform_str + LOG(FATAL) << "Could not read value of " << haz_cam_depth_to_image_trans_str << " for robot: " << getenv("ASTROBEE_ROBOT"); } } } - hazcam_depth_to_image_transform.matrix() = M; - - nav_cam_params = camera::CameraParameters(&config, "nav_cam"); - haz_cam_params = camera::CameraParameters(&config, "haz_cam"); - sci_cam_params = camera::CameraParameters(&config, "sci_cam"); + haz_cam_depth_to_image_trans.matrix() = M; } // Given two poses aff0 and aff1, and 0 <= alpha <= 1, do linear interpolation. -Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, Eigen::Affine3d const& aff1) { +Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, + Eigen::Affine3d const& aff1) { Eigen::Quaternion rot0(aff0.linear()); Eigen::Quaternion rot1(aff1.linear()); @@ -261,7 +277,8 @@ Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, Eigen::A // Given a set of poses indexed by timestamp in an std::map, find the // interpolated pose at desired timestamp. This is efficient // only for very small maps. Else use the StampedPoseStorage class. -bool findInterpPose(double desired_time, std::map const& poses, Eigen::Affine3d& interp_pose) { +bool findInterpPose(double desired_time, std::map const& poses, + Eigen::Affine3d& interp_pose) { double left_time = std::numeric_limits::max(); double right_time = -left_time; for (auto it = poses.begin(); it != poses.end(); it++) { @@ -509,6 +526,11 @@ int robust_find(std::string const& text, std::string const& val, int beg) { beg = text.find(val, beg); if (beg == std::string::npos) return beg; + // TODO(oalexan1): Must skip comments. From this position must + // read back towards the beginning of the current line and see if + // the text "--" is encountered, which would mean that this + // position is on a comment and hence the search must continue. + beg += val.size(); // advance // Look for spaces and the equal sign @@ -536,7 +558,8 @@ int robust_find(std::string const& text, std::string const& val, int beg) { // Replace a given parameter's value in the text. This is very fragile // code, particularly it does not understand that text starting with // "--" is a comment. -void replace_param_val(std::string const& param_name, std::string const& param_val, std::string const& parent, +void replace_param_val(std::string const& param_name, std::string const& param_val, + std::string const& parent, std::string const& beg_brace, std::string const& end_brace, // The text to modify std::string& text) { @@ -589,19 +612,25 @@ void replace_param_val(std::string const& param_name, std::string const& param_v text = text.replace(beg, end - beg + 1, param_val); } -// Modify in-place the robot config file -void update_config_file(bool update_cam1, std::string const& cam1_name, - boost::shared_ptr cam1_params, bool update_cam2, - std::string const& cam2_name, boost::shared_ptr cam2_params, - bool update_depth_to_image_transform, Eigen::Affine3d const& depth_to_image_transform, - bool update_extrinsics, Eigen::Affine3d const& cam2_to_cam1_transform, - bool update_timestamp_offset, std::string const& cam1_to_cam2_timestamp_offset_str, - double cam1_to_cam2_timestamp_offset) { +// Save some transforms from the robot calibration file. This has some very fragile +// logic and cannot handle comments in the config file. +void updateConfigFile // NOLINT +(std::vector const& cam_names, // NOLINT + std::string const& haz_cam_depth_to_image_trans_str, // NOLINT + std::vector const& cam_params, // NOLINT + std::vector const& nav_to_cam_trans, // NOLINT + std::vector const& nav_to_cam_timestamp_offset, // NOLINT + Eigen::Affine3d const& haz_cam_depth_to_image_trans) { // NOLINT + if (cam_names.size() != cam_params.size() || + cam_names.size() != nav_to_cam_trans.size() || + cam_names.size() != nav_to_cam_timestamp_offset.size()) + LOG(FATAL) << "The number of various inputs to updateConfigFile() do not match."; + // Open the config file to modify char* config_dir = getenv("ASTROBEE_CONFIG_DIR"); - if (config_dir == NULL) LOG(FATAL) << "The environmental variable ASTROBEE_CONFIG_DIR was not set"; + if (config_dir == NULL) LOG(FATAL) << "The environmental variable ASTROBEE_CONFIG_DIR was not set."; char* robot = getenv("ASTROBEE_ROBOT"); - if (robot == NULL) LOG(FATAL) << "The environmental variable ASTROBEE_ROBOT was not set"; + if (robot == NULL) LOG(FATAL) << "The environmental variable ASTROBEE_ROBOT was not set."; std::string config_file = std::string(config_dir) + "/robots/" + robot + ".config"; std::ifstream ifs(config_file.c_str()); if (!ifs.is_open()) LOG(FATAL) << "Could not open file: " << config_file; @@ -612,108 +641,52 @@ void update_config_file(bool update_cam1, std::string const& cam1_name, while (std::getline(ifs, line)) text += line + "\n"; ifs.close(); - std::cout << "Updating " << config_file << std::endl; - - // Handle cam1 - if (update_cam1) { - std::string cam1_intrinsics = intrinsics_to_string(cam1_params->GetFocalVector(), cam1_params->GetOpticalOffset()); - replace_param_val("intrinsic_matrix", cam1_intrinsics, cam1_name, "{", "}", text); - Eigen::VectorXd cam1_distortion = cam1_params->GetDistortion(); - if (cam1_distortion.size() > 1) { - std::string distortion_str = vector_to_string(cam1_distortion); - replace_param_val("distortion_coeff", distortion_str, cam1_name, "{", "}", text); - } else if (cam1_distortion.size() == 1) { - std::string distortion_str = " " + number_to_string(cam1_distortion[0]); - replace_param_val("distortion_coeff", distortion_str, cam1_name, "", "", text); + std::cout << "Updating: " << config_file << std::endl; + + for (size_t it = 0; it < cam_names.size(); it++) { + std::string const& cam_name = cam_names[it]; // alias + camera::CameraParameters const& params = cam_params[it]; // alias + + std::string intrinsics = intrinsics_to_string(params.GetFocalVector(), + params.GetOpticalOffset()); + replace_param_val("intrinsic_matrix", intrinsics, cam_name, "{", "}", text); + Eigen::VectorXd cam_distortion = params.GetDistortion(); + if (cam_distortion.size() > 1) { + std::string distortion_str = vector_to_string(cam_distortion); + replace_param_val("distortion_coeff", distortion_str, cam_name, "{", "}", text); + } else if (cam_distortion.size() == 1) { + std::string distortion_str = " " + number_to_string(cam_distortion[0]); + replace_param_val("distortion_coeff", distortion_str, cam_name, "", "", text); } else { - LOG(FATAL) << "Camera " << cam1_name << " must have distortion."; + LOG(FATAL) << "Camera " << cam_name << " must have distortion."; } - } - - // Handle cam2 - if (update_cam2) { - std::string cam2_intrinsics = intrinsics_to_string(cam2_params->GetFocalVector(), cam2_params->GetOpticalOffset()); - replace_param_val("intrinsic_matrix", cam2_intrinsics, cam2_name, "{", "}", text); - Eigen::VectorXd cam2_distortion = cam2_params->GetDistortion(); - if (cam2_distortion.size() > 1) { - std::string distortion_str = vector_to_string(cam2_distortion); - replace_param_val("distortion_coeff", distortion_str, cam2_name, "{", "}", text); - } else if (cam2_distortion.size() == 1) { - std::string distortion_str = " " + number_to_string(cam2_distortion[0]); - replace_param_val("distortion_coeff", distortion_str, cam2_name, "", "", text); - } else { - LOG(FATAL) << "Camera " << cam2_name << " must have distortion."; - } - } - std::string short_cam1 = ff_common::ReplaceInStr(cam1_name, "_", ""); - std::string short_cam2 = ff_common::ReplaceInStr(cam2_name, "_", ""); + if (cam_names[it] == "nav_cam") continue; // this will have the trivial transforms - if (update_depth_to_image_transform) { - Eigen::MatrixXd depth_to_image_transform_mat = depth_to_image_transform.matrix(); - std::string depth_to_image_transform_param = short_cam2 + "_depth_to_image_transform"; - std::string depth_to_image_transform_val = matrix_to_string(depth_to_image_transform_mat); - replace_param_val(depth_to_image_transform_param, depth_to_image_transform_val, "", "{", "}", text); - } - - if (update_extrinsics) { - if (cam1_name == "nav_cam") { - std::string extrinsics_name = short_cam2 + "_to_" + short_cam1 + "_transform"; - std::string extrinsics_str = affine_to_string(cam2_to_cam1_transform); - replace_param_val(extrinsics_name, extrinsics_str, "", "(", ")", text); - } else if (cam1_name == "sci_cam") { - std::string extrinsics_name = short_cam1 + "_to_" + short_cam2 + "_transform"; - std::string extrinsics_str = affine_to_string(cam2_to_cam1_transform.inverse()); - replace_param_val(extrinsics_name, extrinsics_str, "", "(", ")", text); - } else { - LOG(FATAL) << "Unexpected value for cam1: " << cam1_name; - } + std::string trans_name = "nav_cam_to_" + cam_names[it] + "_transform"; + std::string trans_val = affine_to_string(nav_to_cam_trans[it]); + replace_param_val(trans_name, trans_val, "", "(", ")", text); - if (update_timestamp_offset) - replace_param_val(cam1_to_cam2_timestamp_offset_str, " " + number_to_string(cam1_to_cam2_timestamp_offset), "", + std::string offset_str = "nav_cam_to_" + cam_names[it] + "_timestamp_offset"; + replace_param_val(offset_str, " " + number_to_string(nav_to_cam_timestamp_offset[it]), "", "", "", text); } + std::string depth_to_image_transform_val + = matrix_to_string(haz_cam_depth_to_image_trans.matrix()); + replace_param_val(haz_cam_depth_to_image_trans_str, depth_to_image_transform_val, + "", "{", "}", text); + // Write the updated values std::ofstream ofs(config_file.c_str()); ofs << text; ofs.close(); } -// Some small utilities for writing a file having poses for nav, sci, and haz cam, -// and also the depth timestamp corresponding to given haz intensity timestamp -void writeCameraPoses(std::string const& filename, std::map const& haz_depth_to_image_timestamps, - std::map > const& world_to_cam_poses) { - std::ofstream ofs(filename.c_str()); - ofs.precision(17); - - for (auto it = world_to_cam_poses.begin(); it != world_to_cam_poses.end(); it++) { - std::string cam_name = it->first; - auto& cam_poses = it->second; - - for (auto it2 = cam_poses.begin(); it2 != cam_poses.end(); it2++) { - double timestamp = it2->first; - Eigen::MatrixXd M = it2->second.matrix(); - - ofs << cam_name << ' ' << timestamp << " "; - for (int row = 0; row < 4; row++) { - for (int col = 0; col < 4; col++) { - ofs << M(row, col) << " "; - } - } - ofs << "\n"; - } - } - - for (auto it = haz_depth_to_image_timestamps.begin(); it != haz_depth_to_image_timestamps.end(); it++) { - ofs << "haz_depth_to_image " << it->first << ' ' << it->second << "\n"; - } - - ofs.close(); -} - -void readCameraPoses(std::string const& filename, std::map& haz_depth_to_image_timestamps, - std::map >& world_to_cam_poses) { +void readCameraPoses(std::string const& filename, + std::map& haz_depth_to_image_timestamps, + std::map >& + world_to_cam_poses) { haz_depth_to_image_timestamps.clear(); world_to_cam_poses.clear(); diff --git a/dense_map/geometry_mapper/src/texture_processing.cc b/dense_map/geometry_mapper/src/texture_processing.cc index 1dfebc23..7f0ea9e8 100644 --- a/dense_map/geometry_mapper/src/texture_processing.cc +++ b/dense_map/geometry_mapper/src/texture_processing.cc @@ -655,7 +655,7 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre omp_set_num_threads(num_threads); // Use this many threads for all // consecutive parallel regions - std::cout << "Forming the textured model" << std::endl; + std::cout << "Forming the textured model, please wait ..." << std::endl; util::WallTimer timer; std::vector const& vertices = mesh->get_vertices(); diff --git a/dense_map/geometry_mapper/tools/camera_calibrator.cc b/dense_map/geometry_mapper/tools/camera_calibrator.cc index aa171bdc..320f6fe3 100644 --- a/dense_map/geometry_mapper/tools/camera_calibrator.cc +++ b/dense_map/geometry_mapper/tools/camera_calibrator.cc @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -115,11 +114,11 @@ DEFINE_int32(num_cam1_focal_lengths, 1, DEFINE_int32(num_cam2_focal_lengths, 1, "If set to 2, use separate focal lengths along image rows and columns for cam2. Else use the same one."); -DEFINE_int32(num_cam1_iterations, 10000, "How many solver iterations to perform to solve for cam1 intrinsics."); +DEFINE_int32(num_cam1_iterations, 1000, "How many solver iterations to perform to solve for cam1 intrinsics."); -DEFINE_int32(num_cam2_iterations, 10000, "How many solver iterations to perform to solve for cam2 intrinsics."); +DEFINE_int32(num_cam2_iterations, 1000, "How many solver iterations to perform to solve for cam2 intrinsics."); -DEFINE_int32(num_extrinsics_iterations, 10000, "How many solver iterations to perform to solve for extrinsics."); +DEFINE_int32(num_extrinsics_iterations, 1000, "How many solver iterations to perform to solve for extrinsics."); DEFINE_double(robust_threshold, 4.0, "Pixel errors much larger than this will be exponentially attenuated " @@ -487,6 +486,11 @@ void read_depth_cam_meas( // Inputs cam2_timestamps.push_back(it->first); } + if (cam2_timestamps.empty()) { + std::cout << "Could not read any target corners." << std::endl; + return; + } + // Iterate over the depth cam clouds rosbag::Bag bag; bag.open(bag_file, rosbag::bagmode::Read); @@ -551,13 +555,15 @@ void read_depth_cam_meas( // Inputs } pcl::PointCloud pc; - pcl::fromROSMsg(*pc_msg, pc); - if (static_cast(pc.points.size()) != static_cast(pc_msg->width * pc_msg->height)) + dense_map::msgToPcl(pc_msg, pc); + + if (static_cast(pc.points.size()) != pc_msg->width * pc_msg->height) LOG(FATAL) << "Extracted point cloud size does not agree with original size."; double best_image_timestamp = cam2_timestamps[image_cid]; auto it = cam2_target_corners.find(best_image_timestamp); - if (it == cam2_target_corners.end()) LOG(FATAL) << "Cannot locate target corners at desired timestamp."; + if (it == cam2_target_corners.end()) + LOG(FATAL) << "Cannot locate target corners at desired timestamp."; // Will push here one depth measurement for each target corners. // Invalid ones will be (0, 0, 0). @@ -601,7 +607,8 @@ void read_depth_cam_meas( // Inputs // Create the vector timestamp offset samples. If none are specified // on the command line, use only the one specified in the config file. -void parse_timestamp_offset_sampling(double cam1_to_cam2_timestamp_offset, std::string const& timestamp_offset_sampling, +void parse_timestamp_offset_sampling(double cam1_to_cam2_timestamp_offset, + std::string const& timestamp_offset_sampling, std::vector& samples) { std::istringstream is(timestamp_offset_sampling); @@ -683,7 +690,10 @@ double calc_and_print_residual_stats(std::string const& prefix, std::string cons std::vector err_norm; size_t len = residuals.size() / 2; - if (len == 0) LOG(FATAL) << "No residuals exist."; + if (len == 0) { + std::cout << "No residuals exist. This is likely an error.\n"; + return 0; + } double res = 0.0; for (size_t it = 0; it < len; it++) { @@ -694,15 +704,18 @@ double calc_and_print_residual_stats(std::string const& prefix, std::string cons res /= len; std::sort(err_norm.begin(), err_norm.end()); - std::cout << prefix << " min, mean, median and max reprojection error for " << cam_name << ": " << err_norm[0] << ' ' + std::cout << prefix << " min, mean, median and max reprojection error for " + << cam_name << ": " << err_norm[0] << ' ' << res << ' ' << err_norm[err_norm.size() / 2] << ' ' << err_norm.back() << std::endl; // Return the median error return err_norm[err_norm.size() / 2]; } -void save_residuals(std::string const& prefix, std::string const& cam_name, std::string const& output_dir, - std::vector const& pixels, std::vector const& residuals) { +void save_residuals(std::string const& prefix, std::string const& cam_name, + std::string const& output_dir, + std::vector const& pixels, + std::vector const& residuals) { size_t len = residuals.size() / 2; if (len != pixels.size()) { LOG(FATAL) << "Must have one residual norm per pixel."; @@ -715,8 +728,8 @@ void save_residuals(std::string const& prefix, std::string const& cam_name, std: ofs << "# pixel_x, pixel_y, residual_x, residual_y, residual_norm\n"; for (size_t it = 0; it < len; it++) { double norm = Eigen::Vector2d(residuals[2 * it + 0], residuals[2 * it + 1]).norm(); - ofs << pixels[it][0] << ", " << pixels[it][1] << ", " << residuals[2 * it + 0] << ", " << residuals[2 * it + 1] - << ", " << norm << std::endl; + ofs << pixels[it][0] << ", " << pixels[it][1] << ", " << residuals[2 * it + 0] + << ", " << residuals[2 * it + 1] << ", " << norm << std::endl; } ofs.close(); } @@ -769,37 +782,42 @@ void refine_intrinsics( // Inputs ceres::LossFunction* loss_function = GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(cost_function, loss_function, &focal_vector[0], &optical_center[0], &distortion[0], + problem.AddResidualBlock(cost_function, loss_function, &focal_vector[0], + &optical_center[0], &distortion[0], &cameras[NUM_RIGID_PARAMS * cam_it]); // In case it is ever desired to experiment with fixing the cameras // problem.SetParameterBlockConstant(&cameras[NUM_RIGID_PARAMS * cam_it]); - } - } - // See which values to float or keep fixed - if (cam_intrinsics_to_float.find("focal_length") == cam_intrinsics_to_float.end()) { - // std::cout << "For " << cam_name << " not floating focal_length." << - // std::endl; - problem.SetParameterBlockConstant(&focal_vector[0]); - } else { - // std::cout << "For " << cam_name << " floating focal_length." << - // std::endl; - } - if (cam_intrinsics_to_float.find("optical_center") == cam_intrinsics_to_float.end()) { - // std::cout << "For " << cam_name << " not floating optical_center." << - // std::endl; - problem.SetParameterBlockConstant(&optical_center[0]); - } else { - // std::cout << "For " << cam_name << " floating optical_center." << - // std::endl; - } - if (cam_intrinsics_to_float.find("distortion") == cam_intrinsics_to_float.end()) { - // std::cout << "For " << cam_name << " not floating distortion." << - // std::endl; - problem.SetParameterBlockConstant(&distortion[0]); - } else { - // std::cout << "For " << cam_name << " floating distortion." << std::endl; + // This logic must be in the loop, right after the residual got + // added, otherwise one risks fixing parameters which were not + // created yet. + + // See which values to float or keep fixed + if (cam_intrinsics_to_float.find("focal_length") == cam_intrinsics_to_float.end()) { + // std::cout << "For " << cam_name << " not floating focal_length." << + // std::endl; + problem.SetParameterBlockConstant(&focal_vector[0]); + } else { + // std::cout << "For " << cam_name << " floating focal_length." << + // std::endl; + } + if (cam_intrinsics_to_float.find("optical_center") == cam_intrinsics_to_float.end()) { + // std::cout << "For " << cam_name << " not floating optical_center." << + // std::endl; + problem.SetParameterBlockConstant(&optical_center[0]); + } else { + // std::cout << "For " << cam_name << " floating optical_center." << + // std::endl; + } + if (cam_intrinsics_to_float.find("distortion") == cam_intrinsics_to_float.end()) { + // std::cout << "For " << cam_name << " not floating distortion." << + // std::endl; + problem.SetParameterBlockConstant(&distortion[0]); + } else { + // std::cout << "For " << cam_name << " floating distortion." << std::endl; + } + } } // Evaluate the residual before optimization @@ -1170,8 +1188,10 @@ void find_initial_extrinsics(std::vector const& cam1_timestamps, std::ve // Refine cam2_to_cam1_transform void refine_extrinsics(int num_iterations, std::vector const& cam1_timestamps, std::vector const& cam2_timestamps, - std::vector const& world_to_cam1_vec, double cam1_to_cam2_timestamp_offset, - double max_interp_dist, boost::shared_ptr const& cam2_params, + std::vector const& world_to_cam1_vec, + double cam1_to_cam2_timestamp_offset, + double max_interp_dist, + boost::shared_ptr const& cam2_params, std::map>> const& cam2_target_corners, Eigen::Affine3d& cam2_to_cam1_transform, double& median_error) { // Convert the initial transform to vector format, to pass to the solver @@ -1224,7 +1244,8 @@ void refine_extrinsics(int num_iterations, std::vector const& cam1_times double alpha = (cam2_time - left_cam1_time) / (right_cam1_time - left_cam1_time); - Eigen::Affine3d interp_world_to_cam1 = dense_map::linearInterp(alpha, left_world_to_cam1, right_world_to_cam1); + Eigen::Affine3d interp_world_to_cam1 + = dense_map::linearInterp(alpha, left_world_to_cam1, right_world_to_cam1); auto it = cam2_target_corners.find(cam2_time); if (it == cam2_target_corners.end()) continue; @@ -1238,8 +1259,9 @@ void refine_extrinsics(int num_iterations, std::vector const& cam1_times target_corner_pixel[0] = target_corners[pt_iter][3]; // projection into camera x target_corner_pixel[1] = target_corners[pt_iter][4]; // projection into camera y - ceres::CostFunction* cost_function = ExtrinsicsError::Create(target_corner_pixel, target_corner_meas, - interp_world_to_cam1, block_sizes, cam2_params); + ceres::CostFunction* cost_function + = ExtrinsicsError::Create(target_corner_pixel, target_corner_meas, + interp_world_to_cam1, block_sizes, cam2_params); problem.AddResidualBlock(cost_function, loss_function, &cam2_to_cam1_transform_vec[0]); } } @@ -1279,9 +1301,11 @@ void refine_extrinsics(int num_iterations, std::vector const& cam1_times int main(int argc, char** argv) { ff_common::InitFreeFlyerApplication(&argc, &argv); GOOGLE_PROTOBUF_VERIFY_VERSION; - if (FLAGS_cam1_dir.empty() || FLAGS_cam2_dir.empty()) LOG(FATAL) << "Not all inputs were specified."; + if (FLAGS_cam1_dir.empty() || FLAGS_cam2_dir.empty()) + LOG(FATAL) << "Not all inputs were specified."; - if ((FLAGS_cam1_name != "nav_cam" && FLAGS_cam1_name != "sci_cam") || FLAGS_cam2_name != "haz_cam") + if ((FLAGS_cam1_name != "nav_cam" && FLAGS_cam1_name != "sci_cam") || + FLAGS_cam2_name != "haz_cam") LOG(FATAL) << "Must specify -cam1_name as one of nav_cam or sci_cam," << " and -cam2_name as haz_cam."; @@ -1309,30 +1333,52 @@ int main(int argc, char** argv) { dense_map::parse_intrinsics_to_float(FLAGS_cam1_intrinsics_to_float, cam1_intrinsics_to_float); dense_map::parse_intrinsics_to_float(FLAGS_cam2_intrinsics_to_float, cam2_intrinsics_to_float); - config_reader::ConfigReader config; - config.AddFile("cameras.config"); - config.AddFile("transforms.config"); - if (!config.ReadFiles()) LOG(FATAL) << "Failed to read config files."; - // read cam1 params - boost::shared_ptr cam1_params = - boost::shared_ptr(new camera::CameraParameters(&config, FLAGS_cam1_name.c_str())); - // read cam2 params - boost::shared_ptr cam2_params = - boost::shared_ptr(new camera::CameraParameters(&config, FLAGS_cam2_name.c_str())); - - std::string cam1_to_cam2_timestamp_offset_str; + // Read the bot config file + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d navcam_to_body_trans; + Eigen::Affine3d hazcam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, navcam_to_body_trans, + hazcam_depth_to_image_transform); + + Eigen::Affine3d hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); + Eigen::Affine3d scicam_to_hazcam_aff_trans = + ref_to_cam_trans[1] * ref_to_cam_trans[2].inverse(); + double navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; + double scicam_to_hazcam_timestamp_offset = + ref_to_cam_timestamp_offsets[1] - ref_to_cam_timestamp_offsets[2]; + + // Set cam1 and cam2 params + boost::shared_ptr cam1_params; + boost::shared_ptr cam2_params; double cam1_to_cam2_timestamp_offset = 0.0; + std::string cam1_to_cam2_timestamp_offset_str; + Eigen::Affine3d cam2_to_cam1_transform; if (FLAGS_cam1_name == "nav_cam" && FLAGS_cam2_name == "haz_cam") { + cam1_params = boost::shared_ptr + (new camera::CameraParameters(cam_params[0])); + cam2_params = boost::shared_ptr + (new camera::CameraParameters(cam_params[1])); + cam1_to_cam2_timestamp_offset = navcam_to_hazcam_timestamp_offset; cam1_to_cam2_timestamp_offset_str = "navcam_to_hazcam_timestamp_offset"; + cam2_to_cam1_transform = hazcam_to_navcam_aff_trans; } else if (FLAGS_cam1_name == "sci_cam" && FLAGS_cam2_name == "haz_cam") { + cam1_params = boost::shared_ptr + (new camera::CameraParameters(cam_params[2])); + cam2_params = boost::shared_ptr + (new camera::CameraParameters(cam_params[1])); + cam1_to_cam2_timestamp_offset = scicam_to_hazcam_timestamp_offset; cam1_to_cam2_timestamp_offset_str = "scicam_to_hazcam_timestamp_offset"; + cam2_to_cam1_transform = scicam_to_hazcam_aff_trans.inverse(); } else { LOG(FATAL) << "Must specify -cam1_name as nav_cam or sci_cam, " << "and -cam2_name as haz_cam."; } - if (!config.GetReal(cam1_to_cam2_timestamp_offset_str.c_str(), &cam1_to_cam2_timestamp_offset)) - LOG(FATAL) << "Could not read value of " << cam1_to_cam2_timestamp_offset - << " for robot: " << getenv("ASTROBEE_ROBOT"); if (FLAGS_num_cam1_focal_lengths != 1 && FLAGS_num_cam1_focal_lengths != 2) LOG(FATAL) << "Can use 1 or 2 focal lengths."; @@ -1350,7 +1396,8 @@ int main(int argc, char** argv) { } std::vector timestamp_offset_samples; - dense_map::parse_timestamp_offset_sampling(cam1_to_cam2_timestamp_offset, FLAGS_timestamp_offset_sampling, + dense_map::parse_timestamp_offset_sampling(cam1_to_cam2_timestamp_offset, + FLAGS_timestamp_offset_sampling, timestamp_offset_samples); // Find cam1 poses and timestamps @@ -1366,12 +1413,10 @@ int main(int argc, char** argv) { // Refine cam1_params and world_to_cam1_vec if (FLAGS_update_cam1) dense_map::refine_intrinsics( // Inputs - FLAGS_cam1_name, FLAGS_num_cam1_focal_lengths, - FLAGS_num_cam1_iterations, cam1_intrinsics_to_float, - FLAGS_output_dir, - // Outputs - cam1_params, cam1_observations, cam1_landmarks, world_to_cam1_vec); - + FLAGS_cam1_name, FLAGS_num_cam1_focal_lengths, FLAGS_num_cam1_iterations, + cam1_intrinsics_to_float, FLAGS_output_dir, + // Outputs + cam1_params, cam1_observations, cam1_landmarks, world_to_cam1_vec); // Refine cam2_params. It is very important to note that here we // want the 3D points as measured by the depth camera to project // accurately to the target corner pixels, unlike for the intrinsics @@ -1380,40 +1425,38 @@ int main(int argc, char** argv) { // depth camera while cam1 is not. if (FLAGS_update_cam2) dense_map::refine_depth_cam_intrinsics( // Inputs - FLAGS_cam2_name, FLAGS_num_cam2_focal_lengths, - FLAGS_num_cam2_iterations, cam2_intrinsics_to_float, - cam2_target_corners, target_corners_depth, - // Outputs - cam2_params); + FLAGS_cam2_name, FLAGS_num_cam2_focal_lengths, FLAGS_num_cam2_iterations, + cam2_intrinsics_to_float, cam2_target_corners, target_corners_depth, + // Outputs + cam2_params); // Find cam2 poses and timestamps std::vector cam2_timestamps; std::vector> cam2_observations; std::vector> cam2_landmarks; std::vector world_to_cam2_vec; - dense_map::find_world_to_cam_trans(FLAGS_cam2_name, cam2_params, cam2_target_corners, cam2_timestamps, - cam2_observations, cam2_landmarks, world_to_cam2_vec); + dense_map::find_world_to_cam_trans(FLAGS_cam2_name, cam2_params, cam2_target_corners, + cam2_timestamps, cam2_observations, cam2_landmarks, + world_to_cam2_vec); // Find the transform to correct the scale of the haz cam depth clouds - Eigen::Affine3d depth_to_image_transform; if (FLAGS_update_depth_to_image_transform) dense_map::find_depth_to_image_transform( // Inputs - cam2_timestamps, world_to_cam2_vec, cam2_target_corners, - target_corners_depth, FLAGS_cam2_name, cam2_params, + cam2_timestamps, world_to_cam2_vec, cam2_target_corners, target_corners_depth, + FLAGS_cam2_name, cam2_params, // Outputs - depth_to_image_transform); + hazcam_depth_to_image_transform); // Find the transform from cam2 to cam1. - Eigen::Affine3d cam2_to_cam1_transform; if (FLAGS_update_extrinsics) { // If provided, try several values // for cam1_to_cam2_timestamp_offset and pick the one with the smallest // median error. if (FLAGS_timestamp_offset_sampling != "") { std::cout << std::endl; - std::cout << "Sampling the timestamp offset from " << timestamp_offset_samples.front() << " to " - << timestamp_offset_samples.back() << " with " << timestamp_offset_samples.size() << " sample(s)." - << std::endl; + std::cout << "Sampling the timestamp offset from " << timestamp_offset_samples.front() + << " to " << timestamp_offset_samples.back() << " with " + << timestamp_offset_samples.size() << " sample(s)." << std::endl; } double median_extrinsics_err = std::numeric_limits::max(); @@ -1426,12 +1469,17 @@ int main(int argc, char** argv) { std::cout << "Finding extrinsics with " << cam1_to_cam2_timestamp_offset_str << " " << curr_cam1_to_cam2_timestamp_offset << std::endl; - dense_map::find_initial_extrinsics(cam1_timestamps, cam2_timestamps, world_to_cam1_vec, world_to_cam2_vec, - curr_cam1_to_cam2_timestamp_offset, FLAGS_max_interp_dist, + dense_map::find_initial_extrinsics(cam1_timestamps, cam2_timestamps, + world_to_cam1_vec, world_to_cam2_vec, + curr_cam1_to_cam2_timestamp_offset, + FLAGS_max_interp_dist, curr_cam2_to_cam1_transform); - dense_map::refine_extrinsics(FLAGS_num_extrinsics_iterations, cam1_timestamps, cam2_timestamps, world_to_cam1_vec, - curr_cam1_to_cam2_timestamp_offset, FLAGS_max_interp_dist, cam2_params, - cam2_target_corners, curr_cam2_to_cam1_transform, curr_median_extrinsics_err); + dense_map::refine_extrinsics(FLAGS_num_extrinsics_iterations, cam1_timestamps, + cam2_timestamps, world_to_cam1_vec, + curr_cam1_to_cam2_timestamp_offset, FLAGS_max_interp_dist, + cam2_params, + cam2_target_corners, curr_cam2_to_cam1_transform, + curr_median_extrinsics_err); if (curr_median_extrinsics_err < median_extrinsics_err) { cam1_to_cam2_timestamp_offset = curr_cam1_to_cam2_timestamp_offset; cam2_to_cam1_transform = curr_cam2_to_cam1_transform; @@ -1440,21 +1488,46 @@ int main(int argc, char** argv) { } std::cout << std::endl; - std::cout << "Final value of " << cam1_to_cam2_timestamp_offset_str << " is " << cam1_to_cam2_timestamp_offset - << " with smallest median error of " << median_extrinsics_err << " pixels" << std::endl; + std::cout << "Final value of " << cam1_to_cam2_timestamp_offset_str << " is " + << cam1_to_cam2_timestamp_offset + << " with smallest median error of " << median_extrinsics_err + << " pixels" << std::endl; std::cout << std::endl; - std::cout << "Extrinsics transform from " << FLAGS_cam2_name << " to " << FLAGS_cam1_name << ":\n" + std::cout << "Extrinsics transform from " << FLAGS_cam2_name << " to " + << FLAGS_cam1_name << ":\n" << cam2_to_cam1_transform.matrix() << std::endl; } - bool update_timestamp_offset = (FLAGS_timestamp_offset_sampling != ""); - dense_map::update_config_file(FLAGS_update_cam1, FLAGS_cam1_name, cam1_params, FLAGS_update_cam2, FLAGS_cam2_name, - cam2_params, FLAGS_update_depth_to_image_transform, depth_to_image_transform, - FLAGS_update_extrinsics, cam2_to_cam1_transform, update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, cam1_to_cam2_timestamp_offset); + // Save back the results + if (FLAGS_cam1_name == "nav_cam" && FLAGS_cam2_name == "haz_cam") { + cam_params[0] = *cam1_params; + cam_params[1] = *cam2_params; + navcam_to_hazcam_timestamp_offset = cam1_to_cam2_timestamp_offset; + hazcam_to_navcam_aff_trans = cam2_to_cam1_transform; + } else if (FLAGS_cam1_name == "sci_cam" && FLAGS_cam2_name == "haz_cam") { + cam_params[2] = *cam1_params; + cam_params[1] = *cam2_params; + scicam_to_hazcam_timestamp_offset = cam1_to_cam2_timestamp_offset; + scicam_to_hazcam_aff_trans = cam2_to_cam1_transform.inverse(); + } else { + LOG(FATAL) << "Must specify -cam1_name as nav_cam or sci_cam, " + << "and -cam2_name as haz_cam."; + } - if (FLAGS_output_dir == "") std::cout << "No output directory provided, hence no residuals got saved." << std::endl; + // Recall that cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + // nav_to_haz + ref_to_cam_trans[1] = hazcam_to_navcam_aff_trans.inverse(); + // nav_to_sci + ref_to_cam_trans[2] = scicam_to_hazcam_aff_trans.inverse() * + hazcam_to_navcam_aff_trans.inverse(); + ref_to_cam_timestamp_offsets[1] = navcam_to_hazcam_timestamp_offset; + ref_to_cam_timestamp_offsets[2] = + navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; + dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", + cam_params, ref_to_cam_trans, + ref_to_cam_timestamp_offsets, + hazcam_depth_to_image_transform); return 0; } diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index 56dc9d9e..2c3e83e1 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -143,9 +143,9 @@ DEFINE_string(sci_cam_intrinsics_to_float, "", "optical_center, distortion. Specify as a quoted list. " "For example: 'focal_length optical_center'."); -DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, +DEFINE_double(nav_cam_to_sci_cam_offset_override_value, std::numeric_limits::quiet_NaN(), - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); DEFINE_string(mesh, "", @@ -1679,7 +1679,6 @@ void calc_median_residuals(std::vector const& residuals, int main(int argc, char** argv) { ff_common::InitFreeFlyerApplication(&argc, &argv); GOOGLE_PROTOBUF_VERIFY_VERSION; - std::cout.precision(17); // to be able to print timestamps if (FLAGS_ros_bag.empty()) @@ -1713,39 +1712,40 @@ int main(int argc, char** argv) { if (haz_cam_intensity_handle.bag_msgs.empty()) LOG(FATAL) << "No haz cam images found."; // Read the config file - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", // NOLINT - "scicam_to_hazcam_timestamp_offset", // NOLINT - "hazcam_to_navcam_transform", // NOLINT - "scicam_to_hazcam_transform", // NOLINT - "nav_cam_transform", // NOLINT - "hazcam_depth_to_image_transform", // NOLINT - navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, - hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, - nav_cam_params, haz_cam_params, - sci_cam_params); - - if (!std::isnan(FLAGS_scicam_to_hazcam_timestamp_offset_override_value)) { - double new_val = FLAGS_scicam_to_hazcam_timestamp_offset_override_value; - std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset - << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; - scicam_to_hazcam_timestamp_offset = new_val; + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d navcam_to_body_trans; + Eigen::Affine3d hazcam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, navcam_to_body_trans, + hazcam_depth_to_image_transform); + + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_names.size(); it++) { + if (cam_names[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + ref_to_cam_timestamp_offsets[it] = new_val; + } + } } + // Copy to structures expected by this tool + camera::CameraParameters nav_cam_params = cam_params[0]; + camera::CameraParameters haz_cam_params = cam_params[1]; + camera::CameraParameters sci_cam_params = cam_params[2]; + Eigen::Affine3d hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); + Eigen::Affine3d scicam_to_hazcam_aff_trans = + ref_to_cam_trans[1] * ref_to_cam_trans[2].inverse(); + double navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; + double scicam_to_hazcam_timestamp_offset = + ref_to_cam_timestamp_offsets[1] - ref_to_cam_timestamp_offsets[2]; + if (FLAGS_mesh_weight <= 0.0 || FLAGS_mesh_robust_threshold <= 0.0) LOG(FATAL) << "The mesh weight and robust threshold must be positive.\n"; @@ -1764,14 +1764,6 @@ int main(int argc, char** argv) { std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; - // Convert hazcam_to_navcam_trans to Affine3d - Eigen::Affine3d hazcam_to_navcam_aff_trans; - hazcam_to_navcam_aff_trans.matrix() = hazcam_to_navcam_trans; - - // Convert scicam_to_hazcam_trans to Affine3d - Eigen::Affine3d scicam_to_hazcam_aff_trans; - scicam_to_hazcam_aff_trans.matrix() = scicam_to_hazcam_trans; - // Read the sparse map boost::shared_ptr sparse_map = boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); @@ -1812,7 +1804,8 @@ int main(int argc, char** argv) { std::vector const& sci_cam_msgs = sci_cam_handle.bag_msgs; std::vector all_sci_cam_timestamps; for (size_t sci_it = 0; sci_it < sci_cam_msgs.size(); sci_it++) { - sensor_msgs::Image::ConstPtr sci_image_msg = sci_cam_msgs[sci_it].instantiate(); + sensor_msgs::Image::ConstPtr sci_image_msg + = sci_cam_msgs[sci_it].instantiate(); sensor_msgs::CompressedImage::ConstPtr comp_sci_image_msg = sci_cam_msgs[sci_it].instantiate(); if (sci_image_msg) @@ -2241,49 +2234,22 @@ int main(int argc, char** argv) { sparse_map->Save(FLAGS_output_map); if (!FLAGS_opt_map_only) { - // update nav and haz - bool update_cam1 = true, update_cam2 = true; - std::string cam1_name = "nav_cam", cam2_name = "haz_cam"; - boost::shared_ptr - cam1_params(new camera::CameraParameters(nav_cam_params)); - boost::shared_ptr - cam2_params(new camera::CameraParameters(haz_cam_params)); - bool update_depth_to_image_transform = true; - bool update_extrinsics = true; - bool update_timestamp_offset = true; - std::string cam1_to_cam2_timestamp_offset_str = "navcam_to_hazcam_timestamp_offset"; - // Modify in-place the robot config file - dense_map::update_config_file(update_cam1, cam1_name, cam1_params, - update_cam2, cam2_name, cam2_params, - update_depth_to_image_transform, - hazcam_depth_to_image_transform, update_extrinsics, - hazcam_to_navcam_aff_trans, update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, - navcam_to_hazcam_timestamp_offset); - } - if (!FLAGS_opt_map_only) { - // update sci and haz - // TODO(oalexan1): Write a single function to update all 3 of them - bool update_cam1 = true, update_cam2 = true; - std::string cam1_name = "sci_cam", cam2_name = "haz_cam"; - boost::shared_ptr - cam1_params(new camera::CameraParameters(sci_cam_params)); - boost::shared_ptr - cam2_params(new camera::CameraParameters(haz_cam_params)); - bool update_depth_to_image_transform = true; - bool update_extrinsics = true; - bool update_timestamp_offset = true; - std::string cam1_to_cam2_timestamp_offset_str = "scicam_to_hazcam_timestamp_offset"; - // Modify in-place the robot config file - dense_map::update_config_file(update_cam1, cam1_name, cam1_params, - update_cam2, cam2_name, cam2_params, - update_depth_to_image_transform, - hazcam_depth_to_image_transform, update_extrinsics, - scicam_to_hazcam_aff_trans.inverse(), - update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, - scicam_to_hazcam_timestamp_offset); + // Save back the results + // Recall that cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + // nav_to_haz + ref_to_cam_trans[1] = hazcam_to_navcam_aff_trans.inverse(); + // nav_to_sci + ref_to_cam_trans[2] = scicam_to_hazcam_aff_trans.inverse() * + hazcam_to_navcam_aff_trans.inverse(); + ref_to_cam_timestamp_offsets[1] = navcam_to_hazcam_timestamp_offset; + ref_to_cam_timestamp_offsets[2] = + navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; + dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", + cam_params, ref_to_cam_trans, + ref_to_cam_timestamp_offsets, + hazcam_depth_to_image_transform); } + return 0; } // NOLINT diff --git a/dense_map/geometry_mapper/tools/camera_refiner2.cc b/dense_map/geometry_mapper/tools/camera_refiner2.cc index a419be0d..b61012b3 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner2.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner2.cc @@ -165,9 +165,9 @@ DEFINE_string(sci_cam_intrinsics_to_float, "", "optical_center, distortion. Specify as a quoted list. " "For example: 'focal_length optical_center'."); -DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, +DEFINE_double(nav_cam_to_sci_cam_offset_override_value, std::numeric_limits::quiet_NaN(), - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); DEFINE_double(depth_weight, 10.0, @@ -960,38 +960,60 @@ int main(int argc, char** argv) { if (FLAGS_timestamp_offsets_max_change < 0) LOG(FATAL) << "The timestamp offsets must be non-negative."; - // Read the config file - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", // NOLINT - "scicam_to_hazcam_timestamp_offset", // NOLINT - "hazcam_to_navcam_transform", // NOLINT - "scicam_to_hazcam_transform", // NOLINT - "nav_cam_transform", // NOLINT - "hazcam_depth_to_image_transform", // NOLINT - navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, - hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, - nav_cam_params, haz_cam_params, - sci_cam_params); - - if (!std::isnan(FLAGS_scicam_to_hazcam_timestamp_offset_override_value)) { - double new_val = FLAGS_scicam_to_hazcam_timestamp_offset_override_value; - std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset - << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; - scicam_to_hazcam_timestamp_offset = new_val; + // We assume our camera rig has n camera types. Each can be image or + // depth + image. Just one camera must be the reference camera. In + // this code it will be nav_cam. Every camera object (class + // cameraImage) knows its type (an index), which can be used to look + // up its intrinsics, image topic, depth topic (if present), + // ref_to_cam_timestamp_offset, and ref_to_cam_transform. A camera + // object also stores its image, depth cloud (if present), its + // timestamp, and indices pointing to its left and right ref + // bracketing cameras. + + // For every instance of a reference camera its + // ref_to_cam_timestamp_offset is 0 and kept fixed, + // ref_to_cam_transform is the identity and kept fixed, and the + // indices pointing to the left and right ref bracketing cameras are + // identical. + + // The info below will eventually come from a file + int num_cam_types = 3; + int ref_cam_type = 0; // Below we assume the starting cam is the ref cam + + // Image and depth topics + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", + "/hw/depth_haz/extended/amplitude_int", + "/hw/cam_sci/compressed"}; + std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; + + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d nav_cam_to_body_trans; + Eigen::Affine3d haz_cam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, nav_cam_to_body_trans, + haz_cam_depth_to_image_transform); + + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_names.size(); it++) { + if (cam_names[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + ref_to_cam_timestamp_offsets[it] = new_val; + } + } + } + + std::vector orig_cam_params = cam_params; + + for (size_t it = 0; it < ref_to_cam_timestamp_offsets.size(); it++) { + std::cout << "Ref to cam offset for " << cam_names[it] << ' ' + << ref_to_cam_timestamp_offsets[it] << std::endl; } if (FLAGS_mesh_weight <= 0.0) @@ -1011,30 +1033,13 @@ int main(int argc, char** argv) { while (ifs >> val) sci_cam_timestamps_to_use.insert(val); } -#if 0 - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() - << "\n"; -#endif - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; - - double hazcam_depth_to_image_scale - = pow(hazcam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); + double haz_cam_depth_to_image_scale + = pow(haz_cam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); // Since we will keep the scale fixed, vary the part of the transform without // the scale, while adding the scale each time before the transform is applied - Eigen::Affine3d hazcam_depth_to_image_noscale = hazcam_depth_to_image_transform; - hazcam_depth_to_image_noscale.linear() /= hazcam_depth_to_image_scale; - - // Convert hazcam_to_navcam_trans to Affine3d - Eigen::Affine3d hazcam_to_navcam_aff_trans; - hazcam_to_navcam_aff_trans.matrix() = hazcam_to_navcam_trans; - - // Convert scicam_to_hazcam_trans to Affine3d - Eigen::Affine3d scicam_to_hazcam_aff_trans; - scicam_to_hazcam_aff_trans.matrix() = scicam_to_hazcam_trans; + Eigen::Affine3d haz_cam_depth_to_image_noscale = haz_cam_depth_to_image_transform; + haz_cam_depth_to_image_noscale.linear() /= haz_cam_depth_to_image_scale; // Read the sparse map boost::shared_ptr sparse_map = @@ -1065,65 +1070,17 @@ int main(int argc, char** argv) { dense_map::rigid_transform_to_array(world_to_ref_t[cid], &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); - // We assume our camera rig has n camera types. Each can be image or - // depth + image. Just one camera must be the reference camera. In - // this code it will be nav_cam. Every camera object (class - // cameraImage) knows its type (an index), which can be used to look - // up its intrinsics, image topic, depth topic (if present), - // ref_to_cam_timestamp_offset, and ref_to_cam_transform. A camera - // object also stores its image, depth cloud (if present), its - // timestamp, and indices pointing to its left and right ref - // bracketing cameras. - - // For every instance of a reference camera its - // ref_to_cam_timestamp_offset is 0 and kept fixed, - // ref_to_cam_transform is the identity and kept fixed, and the - // indices pointing to the left and right ref bracketing cameras are - // identical. - - // The info below will eventually come from a file - int num_cam_types = 3; - int ref_cam_type = 0; // Below we assume the starting cam is the ref cam - - // Image and depth topics - std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; - std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", - "/hw/depth_haz/extended/amplitude_int", - "/hw/cam_sci/compressed"}; - std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; std::vector> cam_timestamps_to_use = {std::set(), std::set(), sci_cam_timestamps_to_use}; - // The timestamp offsets from ref cam to given cam - std::vector ref_to_cam_timestamp_offsets = - {0.0, - navcam_to_hazcam_timestamp_offset, - navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset}; - - for (size_t it = 0; it < ref_to_cam_timestamp_offsets.size(); it++) { - std::cout << "Ref to cam offset for " << cam_names[it] << ' ' - << ref_to_cam_timestamp_offsets[it] << std::endl; - } - - std::vector cam_params = {nav_cam_params, - haz_cam_params, - sci_cam_params}; - // Which intrinsics from which cameras to float std::vector> intrinsics_to_float(num_cam_types); dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); - // The transform from ref to given cam - std::vector ref_to_cam_trans; - ref_to_cam_trans.push_back(Eigen::Affine3d::Identity()); - ref_to_cam_trans.push_back(hazcam_to_navcam_aff_trans.inverse()); - ref_to_cam_trans.push_back(scicam_to_hazcam_aff_trans.inverse() - * hazcam_to_navcam_aff_trans.inverse()); - // Put in arrays, so we can optimize them std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); for (int cam_type = 0; cam_type < num_cam_types; cam_type++) @@ -1138,9 +1095,9 @@ int main(int argc, char** argv) { // Depth to image transforms and scales std::vector depth_to_image_noscale; - std::vector depth_to_image_scales = {1.0, hazcam_depth_to_image_scale, 1.0}; + std::vector depth_to_image_scales = {1.0, haz_cam_depth_to_image_scale, 1.0}; depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); - depth_to_image_noscale.push_back(hazcam_depth_to_image_noscale); + depth_to_image_noscale.push_back(haz_cam_depth_to_image_noscale); depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); // Put in arrays, so we can optimize them std::vector depth_to_image_noscale_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); @@ -1267,7 +1224,8 @@ int main(int argc, char** argv) { if (found_time >= right_timestamp) break; // out of range cv::Mat image; - if (!dense_map::lookupImage(curr_timestamp, bag_map[image_topics[cam_type]], save_grayscale, + if (!dense_map::lookupImage(curr_timestamp, bag_map[image_topics[cam_type]], + save_grayscale, // outputs image, start_pos, // care here @@ -1286,7 +1244,8 @@ int main(int argc, char** argv) { image.copyTo(best_image); } - // Go forward in time. We count on the fact that lookupImage() looks forward from given guess. + // Go forward in time. We count on the fact that + // lookupImage() looks forward from given guess. curr_timestamp = std::nextafter(found_time, 1.01 * found_time); } @@ -1622,7 +1581,8 @@ int main(int argc, char** argv) { // std::cout << "pid is " << pid << std::endl; // std::cout << "---aaa pid size is " << pid_to_cid_fid[pid].size() << std::endl; // std::cout << "zzz2 "; - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { int cid = cid_fid->first; int fid = cid_fid->second; // std::cout << cid << ' ' << keypoint_vec[cid][fid].first << ' '; @@ -1642,7 +1602,8 @@ int main(int argc, char** argv) { std::vector world_to_cam_vec; std::vector pix_vec; - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { int cid = cid_fid->first; int fid = cid_fid->second; @@ -2049,6 +2010,14 @@ int main(int argc, char** argv) { cam_params[it].SetDistortion(distortions[it]); } + // The nav cam did not get optimized. Go back to the solution with + // two focal lengths, rather than the one with one focal length + // solved by this solver (as the average of the two). The two focal + // lengths are very similar, but it is not worth modifying the + // camera model we don't plan to optimize. + if (FLAGS_nav_cam_intrinsics_to_float == "" || FLAGS_num_iterations == 0) + cam_params[ref_cam_type] = orig_cam_params[ref_cam_type]; + // Copy back the optimized extrinsics for (int cam_type = 0; cam_type < num_cam_types; cam_type++) dense_map::array_to_rigid_transform @@ -2061,89 +2030,26 @@ int main(int argc, char** argv) { (depth_to_image_noscale[cam_type], &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - // Copy extrinsics to specific cameras - hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); - scicam_to_hazcam_aff_trans = hazcam_to_navcam_aff_trans.inverse() * ref_to_cam_trans[2].inverse(); - - // Copy intrinsics to the specific cameras - if (FLAGS_nav_cam_intrinsics_to_float != "") nav_cam_params = cam_params[0]; - if (FLAGS_haz_cam_intrinsics_to_float != "") haz_cam_params = cam_params[1]; - if (FLAGS_sci_cam_intrinsics_to_float != "") sci_cam_params = cam_params[2]; - - // Copy the offsets to specific cameras - std::cout.precision(18); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - std::cout << "--offset after " << ref_to_cam_timestamp_offsets[cam_type] << std::endl; - - navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; - scicam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1] - - ref_to_cam_timestamp_offsets[2]; - - std::cout.precision(18); - std::cout << "--navcam_to_hazcam_timestamp_offset " << navcam_to_hazcam_timestamp_offset << std::endl; - std::cout << "--scicam_to_hazcam_timestamp_offset " << scicam_to_hazcam_timestamp_offset << std::endl; - // Update the optimized depth to image (for haz cam only) int cam_type = 1; // haz cam - hazcam_depth_to_image_scale = depth_to_image_scales[cam_type]; - hazcam_depth_to_image_transform = depth_to_image_noscale[cam_type]; - hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; + haz_cam_depth_to_image_scale = depth_to_image_scales[cam_type]; + haz_cam_depth_to_image_transform = depth_to_image_noscale[cam_type]; + haz_cam_depth_to_image_transform.linear() *= haz_cam_depth_to_image_scale; - // Update the config file - { - // update nav and haz - bool update_cam1 = true, update_cam2 = true; - std::string cam1_name = "nav_cam", cam2_name = "haz_cam"; - boost::shared_ptr - cam1_params(new camera::CameraParameters(nav_cam_params)); - boost::shared_ptr - cam2_params(new camera::CameraParameters(haz_cam_params)); - bool update_depth_to_image_transform = true; - bool update_extrinsics = true; - bool update_timestamp_offset = true; - std::string cam1_to_cam2_timestamp_offset_str = "navcam_to_hazcam_timestamp_offset"; - // Modify in-place the robot config file - dense_map::update_config_file(update_cam1, cam1_name, cam1_params, - update_cam2, cam2_name, cam2_params, - update_depth_to_image_transform, - hazcam_depth_to_image_transform, update_extrinsics, - hazcam_to_navcam_aff_trans, update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, - navcam_to_hazcam_timestamp_offset); - } - { - // update sci and haz cams - // TODO(oalexan1): Write a single function to update all 3 of them - bool update_cam1 = true, update_cam2 = true; - std::string cam1_name = "sci_cam", cam2_name = "haz_cam"; - boost::shared_ptr - cam1_params(new camera::CameraParameters(sci_cam_params)); - boost::shared_ptr - cam2_params(new camera::CameraParameters(haz_cam_params)); - bool update_depth_to_image_transform = true; - bool update_extrinsics = true; - bool update_timestamp_offset = true; - std::string cam1_to_cam2_timestamp_offset_str = "scicam_to_hazcam_timestamp_offset"; - // Modify in-place the robot config file - dense_map::update_config_file(update_cam1, cam1_name, cam1_params, - update_cam2, cam2_name, cam2_params, - update_depth_to_image_transform, - hazcam_depth_to_image_transform, update_extrinsics, - scicam_to_hazcam_aff_trans.inverse(), - update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, - scicam_to_hazcam_timestamp_offset); - } + dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", + cam_params, ref_to_cam_trans, + ref_to_cam_timestamp_offsets, + haz_cam_depth_to_image_transform); - std::cout << "Writing: " << FLAGS_output_map << std::endl; if (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "") { std::cout << "Either the sparse map intrinsics or cameras got modified. " << "The map must be rebuilt." << std::endl; dense_map::RebuildMap(FLAGS_output_map, // Will be used for temporary saving of aux data - nav_cam_params, sparse_map); + cam_params[ref_cam_type], sparse_map); } + std::cout << "Writing: " << FLAGS_output_map << std::endl; sparse_map->Save(FLAGS_output_map); return 0; diff --git a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py index 54006b5d..06881ffd 100644 --- a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py +++ b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py @@ -40,7 +40,9 @@ help="The directory containing the undistorted images.", ) parser.add_argument( - "--camera_type", default="", help="The camera type (nav_cam, haz_cam, or sci_cam)." + "--camera_type", + default="", + help="The camera type (nav_cam, haz_cam, or sci_cam, etc.).", ) args = parser.parse_args() @@ -51,14 +53,6 @@ ) sys.exit(1) -if ( - args.camera_type != "nav_cam" - and args.camera_type != "haz_cam" - and args.camera_type != "sci_cam" -): - print("The camera type must be nav_cam, haz_cam, or sci_cam") - sys.exit(1) - # Read the intrinsics intr_file = args.undistorted_image_dir + "/undistorted_intrinsics.txt" if not os.path.exists(intr_file): diff --git a/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc b/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc index ab4bc5ca..deb954bf 100644 --- a/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc +++ b/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc @@ -20,22 +20,17 @@ #include #include +#include + #include #include -#include - -// ROS -#include #include #include -#include -#include - #include + #include #include -#include #include #include @@ -46,9 +41,9 @@ #include #include -// Extract images from a ROS bag. +// Extract clouds a ROS bag. -DEFINE_string(topic, "/hw/cam_nav", "Image topic that you want to write to disk."); +DEFINE_string(topic, "/hw/depth_haz/points", "The topic having the clouds."); DEFINE_string(output_directory, "", "Directory for writing the output imagery."); DEFINE_string(output_format, "%06i", "Format string for writing the output data."); DEFINE_double(start, 0, "Start extracting this many seconds into the bag."); @@ -85,7 +80,7 @@ int main(int argc, char** argv) { if (output_directory.empty()) { char* temp = strdup(argv[1]); std::string base = std::string(basename(temp)); - output_directory = base.substr(0, base.length() - 4) + "_images"; + output_directory = base.substr(0, base.length() - 4) + "_clouds"; } // Create the output directory, if missing @@ -99,70 +94,6 @@ int main(int argc, char** argv) { rosbag::View view(bag, rosbag::TopicQuery(topics)); std::cout << "Copying at most " << view.size() << " frames from the bag file.\n"; for (rosbag::MessageInstance const m : view) { - // Extract a regular image - sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); - if (image_msg) { - ros::Time stamp = image_msg->header.stamp; - curr_time = stamp.toSec(); - - // Set up the output filename - form_filename(image_msg->header.seq, curr_time, filename_buffer, sizeof(filename_buffer)); - std::string name(filename_buffer); - name = output_directory + "/" + name + ".jpg"; - - if (beg_time < 0) beg_time = curr_time; - if (curr_time - beg_time < FLAGS_start || curr_time - beg_time > FLAGS_start + FLAGS_duration) { - continue; - } - - // Convert to an opencv image - cv::Mat image; - try { - // Do a copy as the message is temporary - (cv_bridge::toCvShare(image_msg, "bgr8")->image).copyTo(image); - } catch (cv_bridge::Exception const& e) { - try { - (cv_bridge::toCvShare(image_msg, "32FC1")->image).copyTo(image); - } catch (cv_bridge::Exception const& e) { - LOG(ERROR) << "Unable to convert " << image_msg->encoding.c_str() << " image to bgr8 or 32FC1"; - continue; - } - } - - std::cout << "Writing: " << name << "\n"; - cv::imwrite(name, image); - } - - // Extract a compressed image - sensor_msgs::CompressedImage::ConstPtr comp_image_msg - = m.instantiate(); - if (comp_image_msg) { - ros::Time stamp = comp_image_msg->header.stamp; - curr_time = stamp.toSec(); - - // Set up the output filename - form_filename(comp_image_msg->header.seq, curr_time, filename_buffer, - sizeof(filename_buffer)); - std::string name(filename_buffer); - name = output_directory + "/" + name + ".jpg"; - - if (beg_time < 0) beg_time = curr_time; - if (curr_time - beg_time < FLAGS_start || curr_time - beg_time > FLAGS_start + FLAGS_duration) { - continue; - } - - cv::Mat image; - try { - // convert compressed image data to cv::Mat - image = cv::imdecode(cv::Mat(comp_image_msg->data), cv::IMREAD_COLOR); - } catch (cv_bridge::Exception const& e) { - LOG(ERROR) << "Unable to convert compressed image to bgr8."; - continue; - } - std::cout << "Writing: " << name << "\n"; - cv::imwrite(name, image); - } - sensor_msgs::PointCloud2::ConstPtr pc_msg = m.instantiate(); if (pc_msg) { ros::Time stamp = pc_msg->header.stamp; @@ -174,12 +105,13 @@ int main(int argc, char** argv) { name = output_directory + "/" + name + ".ply"; if (beg_time < 0) beg_time = curr_time; - if (curr_time - beg_time < FLAGS_start || curr_time - beg_time > FLAGS_start + FLAGS_duration) { + if (curr_time - beg_time < FLAGS_start || + curr_time - beg_time > FLAGS_start + FLAGS_duration) { continue; } pcl::PointCloud pc; - pcl::fromROSMsg(*pc_msg, pc); + dense_map::msgToPcl(pc_msg, pc); if (static_cast(pc.points.size()) != static_cast(pc_msg->width * pc_msg->height)) LOG(FATAL) << "Extracted point cloud size does not agree with original size."; diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index 0836a161..bff83728 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -61,14 +60,15 @@ #include #include -// Read from the bag in intrinsics information for the simulated sci cam and save it to disk -void saveSciCamIntrinsics(std::string const& bag_file, std::string const& output_dir) { +// Read from the bag in intrinsics information for the given simulated camera save it to disk +void saveSimCamIntrinsics(std::string const& bag_file, std::string cam_type, + std::string const& output_dir) { int image_width, image_height; double focal_length, optical_center_x, optical_center_y; std::vector topics; - std::string topic_sci_cam_sim_info = std::string("/") + TOPIC_SCI_CAM_SIM_INFO; - topics.push_back(topic_sci_cam_sim_info); + std::string info_topic = "/sim/" + cam_type + "/info"; + topics.push_back(info_topic); bool success = false; rosbag::Bag bag; @@ -90,7 +90,8 @@ void saveSciCamIntrinsics(std::string const& bag_file, std::string const& output } if (!success) { - LOG(FATAL) << "Could not read sci cam intrinsics from: " << bag_file << " on topic: " << topic_sci_cam_sim_info; + LOG(FATAL) << "Could not read intrinsics for simulated camera " << cam_type + << " from bag: " << bag_file << " on topic: " << info_topic << "\n"; return; } @@ -103,8 +104,8 @@ void saveSciCamIntrinsics(std::string const& bag_file, std::string const& output } ofs.precision(17); ofs << "# Unidistored width and height, focal length, undistorted optical center\n"; - ofs << image_width << ' ' << image_height << ' ' << focal_length << ' ' << optical_center_x << ' ' << optical_center_y - << "\n"; + ofs << image_width << ' ' << image_height << ' ' << focal_length << ' ' + << optical_center_x << ' ' << optical_center_y << "\n"; ofs.close(); } @@ -112,9 +113,10 @@ void saveSciCamIntrinsics(std::string const& bag_file, std::string const& output // around that timestamp. Note that we use the counter sparse_id to // travel forward in time through the sparse map upon repeated // invocations of this function to narrow down the search. -bool findNearbySparseMapImages(double desired_nav_cam_time, std::vector const& sparse_map_timestamps, - int& sparse_map_id, // gets modified - std::vector& nearby_cid) { +bool findNearbySparseMapImages(double desired_nav_cam_time, + std::vector const& sparse_map_timestamps, + int & sparse_map_id, // gets modified + std::vector & nearby_cid) { int num_cid = sparse_map_timestamps.size(); if (num_cid < 2) LOG(FATAL) << "Must have a map with at least two images."; @@ -125,7 +127,8 @@ bool findNearbySparseMapImages(double desired_nav_cam_time, std::vector // using the SURF map with the map made up of the same images that // we want to texture) localization may become unreliable. bool within_bounds = - (sparse_map_timestamps[0] <= desired_nav_cam_time && desired_nav_cam_time <= sparse_map_timestamps[num_cid - 1]); + (sparse_map_timestamps[0] <= desired_nav_cam_time && + desired_nav_cam_time <= sparse_map_timestamps[num_cid - 1]); if (!within_bounds) return false; // Try to find this many images in the map around the time in desired_nav_cam_time. @@ -172,14 +175,16 @@ bool findNearbySparseMapImages(double desired_nav_cam_time, std::vector } // Given a timestamp of a desired camera (haz_cam or sci_cam) and the -// list of navcam image timestamps, find the nearest navcam images in +// list of nav_cam image timestamps, find the nearest nav_cam images in // time to the desired timestamp, localize from the sparse map their // poses, then interpolate and transform to find the pose of the // desired camera. -bool findInterpolatedPose(double desired_nav_cam_time, std::vector const& nav_cam_bag_timestamps, - Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, +bool findInterpolatedPose(double desired_nav_cam_time, + std::vector const& nav_cam_bag_timestamps, + Eigen::Affine3d const& nav_cam_to_desired_cam_trans, std::vector const& nav_cam_msgs, - boost::shared_ptr sparse_map, bool use_brisk_map, + boost::shared_ptr sparse_map, + bool use_brisk_map, std::vector const& sparse_map_timestamps, Eigen::MatrixXd& desired_cam_to_world_trans, int& sparse_map_id, int& nav_cam_bag_id, int& nav_cam_pos) { @@ -219,7 +224,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const camera::CameraModel beg_localized_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), sparse_map->GetCameraParameters()); - if (!sparse_map->Localize(beg_image, &beg_localized_cam, NULL, NULL, nearby_cid_ptr)) return false; + if (!sparse_map->Localize(beg_image, &beg_localized_cam, NULL, NULL, nearby_cid_ptr)) + return false; Eigen::Affine3d beg_trans = beg_localized_cam.GetTransform(); // Note that below we use a throw-away value for nav_cam_pos. @@ -247,7 +253,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const Eigen::Affine3d interp_trans = dense_map::linearInterp(alpha, beg_trans, end_trans); - desired_cam_to_world_trans = interp_trans.inverse().matrix() * desired_cam_to_nav_cam_transform; + desired_cam_to_world_trans + = interp_trans.inverse().matrix() * nav_cam_to_desired_cam_trans.matrix().inverse(); return true; } @@ -696,8 +703,8 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Z // Also handle the case when this angle may be invalid double cos_angle = new_dir.dot(curr_dir) / (curr_dir.norm() * new_dir.norm()); double angle = (180.0 / M_PI) * acos(cos_angle); - if (angle < foreshortening_delta || 180.0 - angle < foreshortening_delta || std::isnan(angle) || - std::isinf(angle)) { + if (angle < foreshortening_delta || 180.0 - angle < foreshortening_delta || + std::isnan(angle) || std::isinf(angle)) { will_accept = false; break; } @@ -783,7 +790,7 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa // Add weights which are higher at image center and decrease towards // boundary. This makes voxblox blend better. -void calc_weights(cv::Mat& depthMat, double exponent) { +void calc_weights(cv::Mat& depthMat, double exponent, bool simulated_data) { for (int row = 0; row < depthMat.rows; row++) { #pragma omp parallel for for (int col = 0; col < depthMat.cols; col++) { @@ -793,28 +800,35 @@ void calc_weights(cv::Mat& depthMat, double exponent) { double dcol = col - depthMat.cols / 2.0; double dist_sq = drow * drow + dcol * dcol; - // Some kind of function decaying from center - float weight = 1.0 / pow(1.0 + dist_sq, exponent/2.0); + // For sim data use a weight of 1.0 for each point + float weight = 1.0; + if (!simulated_data) { + // Some kind of function decaying from center + weight = 1.0 / pow(1.0 + dist_sq, exponent/2.0); - // Also, points that are further in z are given less weight - double z = depthMat.at(row, col)[2]; - weight *= 1.0 / (0.001 + z * z); + // Also, points that are further in z are given less weight + double z = depthMat.at(row, col)[2]; + weight *= 1.0 / (0.001 + z * z); - // Make sure the weight does not get too small as voxblox - // will read it as a float. Here making the weights - // just a little bigger than what voxblox will accept. - weight = std::max(weight, static_cast(1.1e-6)); + // Make sure the weight does not get too small as voxblox + // will read it as a float. Here making the weights + // just a little bigger than what voxblox will accept. + weight = std::max(weight, static_cast(1.1e-6)); + } depthMat.at(row, col)[4] = weight; } } } -void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat const& haz_cam_intensity, - Eigen::Affine3d const& hazcam_depth_to_image_transform, int depth_exclude_columns, - int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, - double reliability_weight_exponent, std::vector const& median_filter_params, - bool save_debug_data, const char* filename_buffer, +void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, + cv::Mat const& haz_cam_intensity, + Eigen::Affine3d const& haz_cam_depth_to_image_transform, + bool simulated_data, int depth_exclude_columns, int depth_exclude_rows, + double foreshortening_delta, double depth_hole_fill_diameter, + double reliability_weight_exponent, + std::vector const& median_filter_params, bool save_debug_data, + const char* filename_buffer, Eigen::MatrixXd const& desired_cam_to_world_trans) { // Sanity checks if (static_cast(haz_cam_intensity.cols) != static_cast(pc_msg->width) || @@ -825,7 +839,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Extract the depth point cloud from the message pcl::PointCloud pc; - pcl::fromROSMsg(*pc_msg, pc); + dense_map::msgToPcl(pc_msg, pc); if (static_cast(pc.points.size()) != static_cast(pc_msg->width * pc_msg->height)) LOG(FATAL) << "Extracted point cloud size does not agree with original size."; @@ -851,7 +865,6 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Pick first channel. The precise color of the depth cloud is not important. // It will be wiped later during smoothing and hole-filling anyway. double i = haz_cam_intensity.at(row, col)[0]; - bool skip = (col < depth_exclude_columns || depthMat.cols - col < depth_exclude_columns || row < depth_exclude_rows || depthMat.rows - row < depth_exclude_rows); @@ -865,9 +878,12 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Throw out as outliers points whose x, y, or z values differs // from the median of such values by more than given distance. // This can be a slow operation for big windows. - int num_filter_passes = median_filter_params.size() / 2; - for (int pass = 0; pass < num_filter_passes; pass++) - median_filter(depthMat, workMat, Zero, median_filter_params[2 * pass + 0], median_filter_params[2 * pass + 1]); + if (!simulated_data) { + int num_filter_passes = median_filter_params.size() / 2; + for (int pass = 0; pass < num_filter_passes; pass++) + median_filter(depthMat, workMat, Zero, median_filter_params[2 * pass + 0], + median_filter_params[2 * pass + 1]); + } // Make a copy of the current depth image before filling holes cv::Mat origMat; @@ -878,20 +894,23 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Use a smaller radius for the foreshortening constraint int radius2 = depth_hole_fill_diameter / 4.0; double sigma = 0.5; // gaussian sigma used for hole-filling and smoothing - if (depth_hole_fill_diameter > 0) hole_fill(depthMat, workMat, sigma, Zero, radius1, radius2, foreshortening_delta); + if (depth_hole_fill_diameter > 0 && !simulated_data) + hole_fill(depthMat, workMat, sigma, Zero, radius1, radius2, foreshortening_delta); // Smooth newly added points and also their immediate neighbors. // Use origMat for reference. int radius = depth_hole_fill_diameter / 4.0; - if (depth_hole_fill_diameter > 0) smooth_additions(origMat, depthMat, workMat, sigma, Zero, radius); + if (depth_hole_fill_diameter > 0 && !simulated_data) + smooth_additions(origMat, depthMat, workMat, sigma, Zero, radius); // This is the right place at which to add weights, before adding // extra points messes up with the number of rows in in depthMat. - calc_weights(depthMat, reliability_weight_exponent); + calc_weights(depthMat, reliability_weight_exponent, simulated_data); // Add extra points, but only if we are committed to manipulating the // depth cloud to start with - if (depth_hole_fill_diameter > 0) add_extra_pts(depthMat, workMat, Zero); + if (depth_hole_fill_diameter > 0 && !simulated_data) + add_extra_pts(depthMat, workMat, Zero); if (save_debug_data) { // Save the updated depthMat as a mesh (for debugging) @@ -926,7 +945,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co } Eigen::Vector3d X(pt[0], pt[1], pt[2]); - X = hazcam_depth_to_image_transform * X; + X = haz_cam_depth_to_image_transform * X; pci.points[count].x = X[0]; pci.points[count].y = X[1]; pci.points[count].z = X[2]; @@ -1002,7 +1021,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Apply an affine transform Eigen::Vector3d X(pt[0], pt[1], pt[2]); - X = hazcam_depth_to_image_transform * X; + X = haz_cam_depth_to_image_transform * X; // Apply a 4x4 rotation + translation transform Eigen::VectorXd V(4); @@ -1019,117 +1038,6 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co } } -// This is an instructive code to add to saveCameraPoses() -// to compare the sci cam pose as found via localizing -// the nav cam images vs localizing the sci cam image directly. -// The conclusion is that the sci cam calibration needs -// improvement, as the results are all over the place. -#if 0 -void localize_sci_cam_directly() { - std::cout.precision(8); - while (cam_type == "sci_cam" && !use_brisk_map) { - // Find nav cam images around the nav cam pose to compute - std::vector nearby_cid; - double desired_nav_cam_time = curr_time + desired_cam_to_nav_cam_offset; - int local_sparse_map_id = 0; - if (!findNearbySparseMapImages(desired_nav_cam_time, - sparse_map_timestamps, - local_sparse_map_id, // gets modified - nearby_cid)) { - std::cout << "Fail to find nearby cid!" << std::endl; - break; - } - std::cout << "Pose from using nav cam\n" << desired_cam_to_world_trans << std::endl; - - cv::Mat curr_sci_cam_image; - bool curr_save_grayscale = true; - double found_time = -1.0; - if (!dense_map::lookupImage(curr_time, *desired_cam_msgs, curr_save_grayscale, - curr_sci_cam_image, desired_cam_pos, found_time)) { - std::cout << "Failed to look up image!" << std::endl; - break; - } else { - std::cout << "Success in finding sci cam image!" << std::endl; - } - - snprintf(filename_buffer, sizeof(filename_buffer), - "%s/%10.7f_grayscale.jpg", desired_cam_dir.c_str(), curr_time); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, curr_sci_cam_image); - - double scale = 0.25; - std::cout << "resizing with scale " << scale << std::endl; - cv::Mat resized_image; - cv::resize(curr_sci_cam_image, resized_image, cv::Size(), scale, scale, cv::INTER_AREA); - - std::cout << "Resized image dims: " << resized_image.cols << ' ' << resized_image.rows << "\n"; - - camera::CameraModel curr_sci_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), - g_sci_cam_params); - if (!sparse_map->Localize(resized_image, &curr_sci_cam, NULL, NULL, &nearby_cid)) { - std::cout << "Failed to localize!" << std::endl; - break; - } else { - std::cout << "Success localizing!" << std::endl; - } - - Eigen::Affine3d curr_trans = curr_sci_cam.GetTransform(); - Eigen::MatrixXd T = (curr_trans.inverse()).matrix(); - std::cout << "sci cam mat\n" << T << std::endl; - - Eigen::Vector3d P(T(0, 3), T(1, 3), T(2, 3)); - std::cout << "Position " << P.transpose() << std::endl; - - double best_dist = 1e+100; - double best_delta = -1.0; - Eigen::MatrixXd best_s2n; - - for (double delta = -0.5; delta <= 0.5; delta += 0.05) { - std::cout << "delta is " << delta << std::endl; - - int local_sparse_map_id = 0; - int local_nav_cam_bag_id = 0; - int local_nav_cam_pos = 0; - Eigen::MatrixXd S; - - if (!findInterpolatedPose(curr_time + desired_cam_to_nav_cam_offset + delta, - nav_cam_bag_timestamps, desired_cam_to_nav_cam_transform, - nav_cam_msgs, sparse_map, use_brisk_map, - sparse_map_timestamps, - S, - local_sparse_map_id, - local_nav_cam_bag_id, - local_nav_cam_pos)) { - std::cout << "Failed at local interp" << std::endl; - continue; - } - std::cout << "Local trans for delta:\n" << S << std::endl; - Eigen::Vector3d Q(S(0, 3), S(1, 3), S(2, 3)); - double local_dist = (Q-P).norm(); - std::cout << "delta position and norm " - << delta << ' ' << Q.transpose() << ' ' << local_dist << std::endl; - - Eigen::MatrixXd s2n = desired_cam_to_nav_cam_transform * (S.inverse()) * T; - - if (local_dist < best_dist) { - best_dist = local_dist; - best_delta = delta; - best_s2n = s2n; - } - - if (std::abs(delta) < 1e-10) { - std::cout << "best s2n1\n" << s2n << std::endl; - std::cout << "actual s2n\n" << desired_cam_to_nav_cam_transform << std::endl; - } - } - - std::cout << "best delta and dist " << best_delta << ' ' << best_dist << std::endl; - std::cout << "best s2n2\n" << best_s2n << std::endl; - break; - } -} -#endif - // Given a desired camera and a set of acquisition timestamps for it, // find the nav cam images nearest in time bracketing each timestamp, // find the poses of those images using localization, interpolate the @@ -1140,29 +1048,23 @@ void localize_sci_cam_directly() { // Also write the extracted point clouds and the desired cam (nav or sci) images. -void saveCameraPoses(bool simulated_data, std::string const& cam_type, - double desired_cam_to_nav_cam_offset, - Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, - Eigen::Affine3d& hazcam_depth_to_image_transform, - int depth_exclude_columns, int depth_exclude_rows, - double foreshortening_delta, double depth_hole_fill_diameter, - double reliability_weight_exponent, - std::vector const& median_filter_params, - bool save_debug_data, - std::vector const& nav_cam_msgs, - std::vector const& sci_cam_msgs, - std::vector const& haz_cam_points_msgs, - std::vector const& haz_cam_intensity_msgs, - std::vector const& nav_cam_bag_timestamps, - std::map> const& sci_cam_exif, - std::string const& output_dir, std::string const& desired_cam_dir, - double start, double duration, - double sampling_spacing_seconds, - double dist_between_processed_cams, std::set const& sci_cam_timestamps, - double max_iso_times_exposure, - boost::shared_ptr sparse_map, bool use_brisk_map, - dense_map::StampedPoseStorage const& sim_desired_cam_poses, - std::string const& external_mesh) { +void saveCameraPoses( + bool simulated_data, std::string const& cam_type, double nav_to_desired_cam_offset, + Eigen::Affine3d const& nav_cam_to_desired_cam_trans, + Eigen::Affine3d& haz_cam_depth_to_image_transform, int depth_exclude_columns, + int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, + double reliability_weight_exponent, std::vector const& median_filter_params, + std::vector const& desired_cam_msgs, + std::vector const& nav_cam_msgs, // always needed when not doing sim + std::vector const& haz_cam_points_msgs, + std::vector const& nav_cam_bag_timestamps, + std::map> const& sci_cam_exif, std::string const& output_dir, + std::string const& desired_cam_dir, double start, double duration, + double sampling_spacing_seconds, double dist_between_processed_cams, + std::set const& sci_cam_timestamps, double max_iso_times_exposure, + boost::shared_ptr sparse_map, bool use_brisk_map, + bool do_haz_cam_image, + dense_map::StampedPoseStorage const& sim_desired_cam_poses, bool save_debug_data) { double min_map_timestamp = std::numeric_limits::max(); double max_map_timestamp = -min_map_timestamp; std::vector sparse_map_timestamps; @@ -1178,7 +1080,9 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, } } - // Open a handle for writing the index of files being written to disk + // Open a handle for writing the index of files being written to + // disk. This is not done if we only want to process the haz cam + // cloud without the haz_cam image. std::string index_file = output_dir + "/" + cam_type + "_index.txt"; std::ofstream ofs(index_file.c_str()); @@ -1202,47 +1106,35 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, // This is used to ensure cameras are not too close Eigen::Vector3d prev_cam_ctr(std::numeric_limits::quiet_NaN(), 0, 0); - // Decide on which messages to process - std::vector const* desired_cam_msgs = NULL; - if (cam_type == "haz_cam") { - desired_cam_msgs = &haz_cam_points_msgs; // points topic - } else if (cam_type == "sci_cam") { - desired_cam_msgs = &sci_cam_msgs; // image topic - } else if (cam_type == "nav_cam") { - desired_cam_msgs = &nav_cam_msgs; // image topic - } else { - LOG(FATAL) << "Unknown camera type: " << cam_type; - } - - if (external_mesh == "") { - // Compute the starting bag time as the timestamp of the first cloud - for (auto& m : haz_cam_points_msgs) { - if (beg_time < 0) { - sensor_msgs::PointCloud2::ConstPtr pc_msg; - pc_msg = m.instantiate(); - if (pc_msg) { - beg_time = pc_msg->header.stamp.toSec(); - break; - } + // Compute the starting bag time as the timestamp of the first cloud + for (auto& m : haz_cam_points_msgs) { + if (beg_time < 0) { + sensor_msgs::PointCloud2::ConstPtr pc_msg; + pc_msg = m.instantiate(); + if (pc_msg) { + beg_time = pc_msg->header.stamp.toSec(); + break; } } - } else { - // Use an external mesh, so ignore the depth clouds (may even be absent) - beg_time = min_map_timestamp; } - for (auto& m : *desired_cam_msgs) { + std::vector const* iter_msgs = NULL; + if (cam_type == "haz_cam") + iter_msgs = &haz_cam_points_msgs; // iterate over the point clouds + else + iter_msgs = &desired_cam_msgs; // iterate over desired images + + for (auto& m : *iter_msgs) { sensor_msgs::PointCloud2::ConstPtr pc_msg; double curr_time = -1.0; if (cam_type == "haz_cam") { - // haz cam + // Find the depth cloud and its time pc_msg = m.instantiate(); if (!pc_msg) continue; curr_time = pc_msg->header.stamp.toSec(); } else { - // sci cam or nav cam - // Check for image + // Find the image and its time sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); if (image_msg) { curr_time = image_msg->header.stamp.toSec(); @@ -1259,10 +1151,12 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, } } - bool custom_timestamps = (cam_type == "sci_cam" && !sci_cam_timestamps.empty()); - if (!simulated_data && !use_brisk_map && !custom_timestamps) { - if (curr_time < min_map_timestamp) continue; // Do not process data before the sparse map starts - if (curr_time > max_map_timestamp) break; // Do not process data after the sparse map ends + bool custom_sci_timestamps = (cam_type == "sci_cam" && !sci_cam_timestamps.empty()); + if (!simulated_data && !use_brisk_map && !custom_sci_timestamps) { + // Do not process data before the sparse map starts + if (curr_time < min_map_timestamp) continue; + // Do not process data after the sparse map ends + if (curr_time > max_map_timestamp) break; } if (curr_time - beg_time < start) continue; // did not yet reach the desired starting time @@ -1275,26 +1169,28 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, Eigen::MatrixXd desired_cam_to_world_trans; // Let some time elapse between fusing clouds - if (std::abs(curr_time - prev_time) < sampling_spacing_seconds && !custom_timestamps) { + if (std::abs(curr_time - prev_time) < sampling_spacing_seconds && !custom_sci_timestamps) { continue; } // The case of custom timestamps - if (custom_timestamps && sci_cam_timestamps.find(curr_time) == sci_cam_timestamps.end()) continue; + if (custom_sci_timestamps && sci_cam_timestamps.find(curr_time) == sci_cam_timestamps.end()) + continue; if (!simulated_data) { // Do localization based on nav cam, transform the pose to desired cam coordinates, etc - // Note that we convert from the desired time to the nav cam time for this operation - if (!findInterpolatedPose(curr_time + desired_cam_to_nav_cam_offset, nav_cam_bag_timestamps, - desired_cam_to_nav_cam_transform, nav_cam_msgs, sparse_map, + // Note that we convert from the desired cam time to nav cam time for this operation + if (!findInterpolatedPose(curr_time - nav_to_desired_cam_offset, + nav_cam_bag_timestamps, + nav_cam_to_desired_cam_trans, nav_cam_msgs, sparse_map, use_brisk_map, sparse_map_timestamps, desired_cam_to_world_trans, sparse_map_id, nav_cam_bag_id, nav_cam_pos)) { - std::cout.precision(17); - std::cout << "Localization failed at time " << curr_time << std::endl; - std::cout << "if too many such failures, consider modifying the value of " - << "--localization_options. See the documentation for more info." << std::endl; + std::cout << std::setprecision(17) + << "Localization failed at time " << curr_time << ". " + << "If too many such failures, consider modifying the value of " + << "--localization_options. See the documentation for more info.\n"; continue; } } else { @@ -1318,7 +1214,7 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, curr_trans.matrix() = desired_cam_to_world_trans; Eigen::Vector3d curr_cam_ctr = curr_trans.translation(); double curr_dist_bw_cams = (prev_cam_ctr - curr_cam_ctr).norm(); - if (!std::isnan(prev_cam_ctr[0]) && !custom_timestamps && + if (!std::isnan(prev_cam_ctr[0]) && !custom_sci_timestamps && curr_dist_bw_cams < dist_between_processed_cams) { std::cout << "Distance to previously processed camera is " << curr_dist_bw_cams << ", skipping it." @@ -1329,91 +1225,68 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, char filename_buffer[1000]; std::string suffix = cam_type + "_to_world"; - if (cam_type == "haz_cam") { - cv::Mat haz_cam_intensity; - bool save_grayscale = false; // Get a color image, if possible - - // Normally try to look up the image time right after the current cloud time - double desired_image_time = curr_time; - double found_time = -1.0; - if (!dense_map::lookupImage(desired_image_time, haz_cam_intensity_msgs, - save_grayscale, haz_cam_intensity, + // Lookup and save the image at the current time or soon after it (the latter + // case happens when looking up the image based on the cloud time) + cv::Mat desired_image; + bool save_grayscale = false; // Get a color image, if possible + double found_time = -1.0; + if (cam_type != "haz_cam" || do_haz_cam_image) { + if (!dense_map::lookupImage(curr_time, desired_cam_msgs, save_grayscale, desired_image, desired_cam_pos, found_time)) - continue; + continue; // the expected image could not be found + } else { + // We have cam_type == haz_cam but no haz_cam image texturing + // desired. Create a fake image, only for the purpose of saving + // the point cloud, for which an image is needed. + desired_image = cv::Mat(pc_msg->height, pc_msg->width, CV_8UC3, cv::Scalar(255, 255, 255)); + } + + // Apply an optional scale. Use a pointer to not copy the data more than + // one has to. + cv::Mat* img_ptr = &desired_image; + cv::Mat scaled_image; + if (!simulated_data && cam_type == "sci_cam") { + auto exif_it = sci_cam_exif.find(curr_time); + if (exif_it != sci_cam_exif.end()) { + std::vector const& exif = exif_it->second; + double iso = exif[dense_map::ISO]; + double exposure = exif[dense_map::EXPOSURE_TIME]; + dense_map::exposureCorrection(max_iso_times_exposure, iso, exposure, + *img_ptr, scaled_image); + img_ptr = &scaled_image; + } + } - // Save the transform (only if the above lookup was successful) - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", output_dir.c_str(), - curr_time, suffix.c_str()); - dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", + desired_cam_dir.c_str(), curr_time); + std::cout << "Writing: " << filename_buffer << std::endl; + cv::imwrite(filename_buffer, *img_ptr); - // Save the transform name in the index file - depth_ofs << filename_buffer << "\n"; // for the depth + // Save the name of the image in the index + ofs << filename_buffer << "\n"; + + // Save the transform + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", + output_dir.c_str(), curr_time, suffix.c_str()); + dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + + if (cam_type == "haz_cam") { + // Save the transform name in the index file, for voxblox + depth_ofs << filename_buffer << "\n"; // Append the intensity to the point cloud and save it snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.pcd", output_dir.c_str(), curr_time); - save_proc_depth_cloud(pc_msg, haz_cam_intensity, hazcam_depth_to_image_transform, - depth_exclude_columns, - depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, + save_proc_depth_cloud(pc_msg, desired_image, haz_cam_depth_to_image_transform, + simulated_data, + depth_exclude_columns, depth_exclude_rows, + foreshortening_delta, depth_hole_fill_diameter, reliability_weight_exponent, median_filter_params, save_debug_data, filename_buffer, desired_cam_to_world_trans); // Save the name of the point cloud to the index depth_ofs << filename_buffer << "\n"; - - // Save the image - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", - desired_cam_dir.c_str(), curr_time); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, haz_cam_intensity); - - // Save the name of the image in the index - ofs << filename_buffer << "\n"; - - } else { - // Save the nav or sci cam image at the given timestamp. - cv::Mat desired_cam_image; - bool save_grayscale = false; - double found_time = -1.0; - if (!dense_map::lookupImage(curr_time, *desired_cam_msgs, save_grayscale, - desired_cam_image, desired_cam_pos, found_time)) - continue; - - // Save the transform (only if the above lookup was successful) - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", - output_dir.c_str(), curr_time, suffix.c_str()); - dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); - - // This is very useful for debugging things with ASP. - - // A debug utility for saving a camera in format ASP understands - // if (cam_type == "sci_cam") - // dense_map::save_tsai_camera(desired_cam_to_world_trans, output_dir, curr_time, suffix); - - // Apply an optional scale. Use a pointer to not copy the data more than - // one has to. - cv::Mat* img_ptr = &desired_cam_image; - cv::Mat scaled_image; - if (!simulated_data && cam_type == "sci_cam") { - auto exif_it = sci_cam_exif.find(curr_time); - if (exif_it != sci_cam_exif.end()) { - std::vector const& exif = exif_it->second; - double iso = exif[dense_map::ISO]; - double exposure = exif[dense_map::EXPOSURE_TIME]; - dense_map::exposureCorrection(max_iso_times_exposure, iso, exposure, - *img_ptr, scaled_image); - img_ptr = &scaled_image; - } - } - - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", - desired_cam_dir.c_str(), curr_time); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, *img_ptr); - - // Save the name of the image in the index - ofs << filename_buffer << "\n"; } // Update the previous camera center @@ -1423,9 +1296,10 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, std::cout << "Wrote: " << index_file << std::endl; } -void save_navcam_poses_and_images(boost::shared_ptr sparse_map, +void save_nav_cam_poses_and_images(boost::shared_ptr sparse_map, std::vector const& nav_cam_msgs, - std::string const& output_dir, std::string const& nav_cam_dir) { + std::string const& output_dir, + std::string const& nav_cam_dir) { // Keep track of where in the bag we are int nav_cam_pos = 0; @@ -1443,7 +1317,8 @@ void save_navcam_poses_and_images(boost::shared_ptr s std::string ext = ff_common::file_extension(img_file); std::string cam_file = ff_common::ReplaceInStr(img_file, "." + ext, "_nav_cam_to_world.txt"); cam_file = output_dir + "/" + ff_common::basename(cam_file); - if (cam_file == img_file) LOG(FATAL) << "Failed to replace the image extension in: " << img_file; + if (cam_file == img_file) LOG(FATAL) << "Failed to replace the image extension in: " + << img_file; dense_map::writeMatrix(sparse_map->cid_to_cam_t_global_[cid].inverse().matrix(), cam_file); @@ -1454,7 +1329,8 @@ void save_navcam_poses_and_images(boost::shared_ptr s nav_cam_pos, found_time)) continue; - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", nav_cam_dir.c_str(), timestamp); + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", + nav_cam_dir.c_str(), timestamp); std::cout << "Writing: " << filename_buffer << std::endl; cv::imwrite(filename_buffer, nav_cam_image); @@ -1465,15 +1341,15 @@ void save_navcam_poses_and_images(boost::shared_ptr s DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and sci_cam data."); DEFINE_string(sparse_map, "", "A registered sparse map made with some of the ROS bag data."); DEFINE_string(output_dir, "", "The full path to a directory where to write the processed data."); -DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); -DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); -DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", - "The depth camera intensity topic in the bag file."); -DEFINE_string(sci_cam_topic, "/hw/cam_sci", "The sci cam topic in the bag file."); DEFINE_string(sci_cam_exif_topic, "/hw/sci_cam_exif", "The sci cam exif metadata topic the output bag."); -DEFINE_string(camera_type, "all", - "Specify which cameras to process. By default, process all. Options: " - "nav_cam, sci_cam."); +DEFINE_string(camera_types, "sci_cam nav_cam haz_cam", + "Specify the cameras to use for the textures, as a list in quotes."); +DEFINE_string(camera_topics, "/hw/cam_sci/compressed /mgt/img_sampler/nav_cam/image_record " + "/hw/depth_haz/extended/amplitude_int", + "Specify the bag topics for the cameras to texture (in the same order as in " + "--camera_types). Use a list in quotes."); +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", + "The depth point cloud topic in the bag file."); DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); DEFINE_double(sampling_spacing_seconds, 0.5, @@ -1520,9 +1396,8 @@ DEFINE_bool(use_brisk_map, false, "If specified, instead of a SURF sparse map made from the same bag that needs " "texturing, use a pre-existing and unrelated BRISK map. This map may be more " "convenient but less reliable."); -DEFINE_string(external_mesh, "", "Use this mesh to texture the images, rather than creating one from depth data."); -DEFINE_string(scicam_to_hazcam_timestamp_offset_override_value, "", - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " +DEFINE_string(nav_cam_to_sci_cam_offset_override_value, "", + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); DEFINE_bool(save_debug_data, false, "Save many intermediate datasets for debugging."); @@ -1532,9 +1407,11 @@ int main(int argc, char** argv) { if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; if (FLAGS_output_dir.empty()) LOG(FATAL) << "The output directory was not specified."; - if (FLAGS_sparse_map.empty() && !FLAGS_simulated_data) LOG(FATAL) << "The sparse map was not specified."; + if (FLAGS_sparse_map.empty() && !FLAGS_simulated_data) + LOG(FATAL) << "The sparse map was not specified."; - if (!boost::filesystem::exists(FLAGS_ros_bag)) LOG(FATAL) << "Bag does not exist: " << FLAGS_ros_bag; + if (!boost::filesystem::exists(FLAGS_ros_bag)) LOG(FATAL) << "Bag does not exist: " + << FLAGS_ros_bag; if (!boost::filesystem::exists(FLAGS_sparse_map) && !FLAGS_simulated_data) LOG(FATAL) << "Sparse map does not exist: " << FLAGS_sparse_map; @@ -1542,38 +1419,82 @@ int main(int argc, char** argv) { // Parse the median filter params double val; std::vector median_filter_params; - std::istringstream ifs(FLAGS_median_filters); - while (ifs >> val) median_filter_params.push_back(val); - + std::istringstream ifsm(FLAGS_median_filters); + while (ifsm >> val) median_filter_params.push_back(val); if (median_filter_params.size() % 2 != 0) LOG(FATAL) << "There must exist an even number of median filter parameters, " << "being pairs of window sizes and distances."; - // Need high precision to print timestamps - std::cout.precision(17); + // Parse the camera types + std::vector cam_types; + std::string str; + std::istringstream ifsc(FLAGS_camera_types); + bool do_haz_cam_image = false, do_nav_cam = false; + while (ifsc >> str) { + cam_types.push_back(str); + if (str == "haz_cam") do_haz_cam_image = true; + if (str == "nav_cam") do_nav_cam = true; + } + if (!FLAGS_simulated_data && !do_nav_cam) + LOG(FATAL) << "Nav cam must be in --camera_topics (unless using simulated data) as it is " + << "needed for camera localization.\n"; + if (FLAGS_simulated_data && do_nav_cam) + LOG(FATAL) << "The geometry mapper does not support nav_cam with simulated data as " + << "its distortion is not modeled.\n"; + if (FLAGS_simulated_data && !do_haz_cam_image) + LOG(FATAL) << "The haz_cam must be one of the camera types in simulation mode " + << "as it is needed to read the simulated camera pose in order to " + << "process the depth clouds."; + + // Parse the camera topics + std::map cam_topics; + std::istringstream ifst(FLAGS_camera_topics); + while (ifst >> str) { + size_t count = cam_topics.size(); + if (count >= cam_types.size()) + LOG(FATAL) << "There must be a topic for each camera type.\n"; + cam_topics[cam_types[count]] = str; + std::cout << "Camera topic for " << cam_types[count] << ": " << cam_topics[cam_types[count]] + << "\n"; + } + if (cam_types.size() != cam_topics.size()) + LOG(FATAL) << "There must be a topic for each camera type.\n"; + + if (!FLAGS_simulated_data && !do_haz_cam_image) { + // Even if it is not wanted to process the haz cam image, need to + // have the haz cam topic to be able to process the cloud. + cam_types.push_back("haz_cam"); + // The topic below won't be used, but haz to be in for the + // bookkeeping to be correct. + cam_topics["haz_cam"] = "/hw/depth_haz/extended/amplitude_int"; + } // Read simulated poses - dense_map::StampedPoseStorage sim_sci_cam_poses, sim_haz_cam_poses; + std::vector sim_cam_poses(cam_types.size()); if (FLAGS_simulated_data) { - std::string haz_cam_pose_topic = std::string("/") + TOPIC_HAZ_CAM_SIM_POSE; - std::string sci_cam_pose_topic = std::string("/") + TOPIC_SCI_CAM_SIM_POSE; - std::cout << "haz cam pose topic " << haz_cam_pose_topic << std::endl; - std::cout << "sci cam pose topic " << sci_cam_pose_topic << std::endl; - dense_map::readBagPoses(FLAGS_ros_bag, haz_cam_pose_topic, sim_haz_cam_poses); - dense_map::readBagPoses(FLAGS_ros_bag, sci_cam_pose_topic, sim_sci_cam_poses); + for (size_t it = 0; it < cam_types.size(); it++) { + std::string pose_topic = "/sim/" + cam_types[it] + "/pose"; + std::cout << "Pose topic for simulated camera: " << cam_types[it] << ": " + << pose_topic << "\n"; + dense_map::readBagPoses(FLAGS_ros_bag, pose_topic, sim_cam_poses[it]); + } } // Set up handles for reading data at given time stamp without - // searching through the whole bag each time. - dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); - dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); + // searching through the whole bag each time. Must use pointers + // due to the rosbag API. + std::map> bag_handles; + for (size_t it = 0; it < cam_types.size(); it++) { + bag_handles[cam_types[it]] = boost::shared_ptr + (new dense_map::RosBagHandle(FLAGS_ros_bag, cam_topics[cam_types[it]])); + } + dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); - dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); dense_map::RosBagHandle exif_handle(FLAGS_ros_bag, FLAGS_sci_cam_exif_topic); std::vector nav_cam_bag_timestamps; if (!FLAGS_simulated_data) - dense_map::readBagImageTimestamps(FLAGS_ros_bag, FLAGS_nav_cam_topic, nav_cam_bag_timestamps); + dense_map::readBagImageTimestamps(FLAGS_ros_bag, cam_topics["nav_cam"], nav_cam_bag_timestamps); std::map> sci_cam_exif; if (!FLAGS_simulated_data) dense_map::readExifFromBag(exif_handle.bag_msgs, sci_cam_exif); @@ -1584,53 +1505,45 @@ int main(int argc, char** argv) { std::cout << "Cutting off " << FLAGS_depth_exclude_columns << " columns and " << FLAGS_depth_exclude_rows << " rows at the margins of the depth point clouds.\n"; - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + std::vector cam_params; + std::vector nav_to_cam_trans; + std::vector nav_to_cam_timestamp_offset; + Eigen::Affine3d nav_cam_to_body_trans; // Will not be used + Eigen::Affine3d haz_cam_depth_to_image_transform; if (!FLAGS_simulated_data) { - // Read a bunch of transforms from the robot calibration file. - // The empirically determined offset between the measured timestamps - // of the cameras, in seconds. There are two reasons for this time offset: - // (1) The nav cam and sci cam are acquired on different processors. - // (2) The actual moment the camera image is recorded is not the same - // moment that image finished processing and the timestamp is set. - // This delay is worse for the sci cam which takes longer to acquire. - // TODO(oalexan1): camera_calibrator must actually solve for them. - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", - "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", - "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, - sci_cam_params); - - if (FLAGS_scicam_to_hazcam_timestamp_offset_override_value != "") { - double new_val = atof(FLAGS_scicam_to_hazcam_timestamp_offset_override_value.c_str()); - std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset - << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; - scicam_to_hazcam_timestamp_offset = new_val; + dense_map::readConfigFile( // Inputs + cam_types, + "nav_cam_transform", // this is the nav cam to body transform + "haz_cam_depth_to_image_transform", + // Outputs + cam_params, nav_to_cam_trans, nav_to_cam_timestamp_offset, nav_cam_to_body_trans, + haz_cam_depth_to_image_transform); + + if (FLAGS_nav_cam_to_sci_cam_offset_override_value != "") { + for (size_t it = 0; it < cam_types.size(); it++) { + if (cam_types[it] == "sci_cam") { + double new_val = atof(FLAGS_nav_cam_to_sci_cam_offset_override_value.c_str()); + std::cout << "Overriding the value " << nav_to_cam_timestamp_offset[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + nav_to_cam_timestamp_offset[it] = new_val; + } + } } - - scicam_to_navcam_trans = hazcam_to_navcam_trans * scicam_to_hazcam_trans; - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "scicam_to_navcam_trans\n" << scicam_to_navcam_trans << std::endl; - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; - std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() << "\n"; + } else { + // No modeling of timestamp offset is used with simulated data. The + // transforms between the cameras are not needed as we record each + // camera pose. + nav_to_cam_timestamp_offset = std::vector(cam_types.size(), 0); + for (size_t it = 0; it < cam_types.size(); it++) + nav_to_cam_trans.push_back(Eigen::Affine3d::Identity()); + haz_cam_depth_to_image_transform = Eigen::Affine3d::Identity(); // no adjustment needed } boost::shared_ptr sparse_map; if (!FLAGS_simulated_data) { std::cout << "Loading sparse map " << FLAGS_sparse_map << "\n"; - sparse_map = boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + sparse_map = boost::shared_ptr + (new sparse_mapping::SparseMap(FLAGS_sparse_map)); if (!FLAGS_use_brisk_map && (sparse_map->GetDetectorName() != "SURF" || sparse_map->vocab_db_.binary_db != NULL)) { LOG(FATAL) << "A SURF map with no vocab db is expected, unless --use_brisk_map is specified."; @@ -1646,7 +1559,8 @@ int main(int argc, char** argv) { // ("true" or "false"). Here we ensure that the same flag for histogram // equalization is used in the map and for localization. std::string histogram_equalization; - if (gflags::GetCommandLineOption("histogram_equalization", &histogram_equalization)) { + if (!FLAGS_simulated_data && + gflags::GetCommandLineOption("histogram_equalization", &histogram_equalization)) { if ((histogram_equalization == "true" && !sparse_map->GetHistogramEqualization()) || (histogram_equalization == "false" && sparse_map->GetHistogramEqualization())) LOG(FATAL) << "The histogram equalization option in the sparse map and then one desired " @@ -1661,80 +1575,58 @@ int main(int argc, char** argv) { while (ifs >> val) sci_cam_timestamps.insert(val); } + // Create output directories dense_map::createDir(FLAGS_output_dir); - - // Store here the sci cam and nav cam images whose poses we save - std::string nav_cam_dir = FLAGS_output_dir + "/distorted_nav_cam"; - std::string haz_cam_dir = FLAGS_output_dir + "/distorted_haz_cam"; - std::string sci_cam_dir = FLAGS_output_dir + "/distorted_sci_cam"; - dense_map::createDir(nav_cam_dir); - dense_map::createDir(haz_cam_dir); - dense_map::createDir(sci_cam_dir); - - // Save haz cam poses and haz cam clouds - if (FLAGS_external_mesh == "") { - std::string cam_type = "haz_cam"; - // haz to nav offset - double desired_cam_to_nav_cam_offset = -navcam_to_hazcam_timestamp_offset; - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, hazcam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, - FLAGS_reliability_weight_exponent, - median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, haz_cam_dir, - FLAGS_start, FLAGS_duration, FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, - sci_cam_timestamps, FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, - sim_haz_cam_poses, FLAGS_external_mesh); + std::map cam_dirs; + for (size_t it = 0; it < cam_types.size(); it++) { + cam_dirs[cam_types[it]] = FLAGS_output_dir + "/distorted_" + cam_types[it]; + dense_map::createDir(cam_dirs[cam_types[it]]); } - // Save sci cam poses and sci cam images - if (FLAGS_camera_type == "sci_cam" || FLAGS_camera_type == "all") { - std::string cam_type = "sci_cam"; - // sci to nav offset - double desired_cam_to_nav_cam_offset = scicam_to_hazcam_timestamp_offset - navcam_to_hazcam_timestamp_offset; - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, scicam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, - FLAGS_reliability_weight_exponent, - median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, sci_cam_dir, FLAGS_start, FLAGS_duration, - FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, - FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_sci_cam_poses, - FLAGS_external_mesh); - - // With simulated data we won't perform an additional undistortion - // step which would save the undistorted intrinsics. Hence need to - // do that here. - if (FLAGS_simulated_data) saveSciCamIntrinsics(FLAGS_ros_bag, sci_cam_dir); - } - // Save nav cam poses and nav cam images. This does not work for simulated data. - if ((FLAGS_camera_type == "nav_cam" || FLAGS_camera_type == "all") && !FLAGS_simulated_data) { - if (FLAGS_use_brisk_map) { - // Do the same logic as for the sci cam, localize each nav cam image against the map, etc. - std::string cam_type = "nav_cam"; - double desired_cam_to_nav_cam_offset = 0.0; // no offset from the nav cam to itself - dense_map::StampedPoseStorage sim_nav_cam_poses; // won't be used - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, navcam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, - FLAGS_reliability_weight_exponent, - median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, nav_cam_dir, FLAGS_start, FLAGS_duration, - FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, - FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_nav_cam_poses, - FLAGS_external_mesh); - } else { - // Take a shortcut, since the sparse map is made of precisely of the images we - // want to texture, just save those images and their poses from the map, avoiding - // localization - save_navcam_poses_and_images(sparse_map, nav_cam_handle.bag_msgs, FLAGS_output_dir, nav_cam_dir); + // The nav cam msgs may not exist for simulated data + std::vector empty_nav_cam_msgs; + std::vector * nav_cam_msgs_ptr; + if (FLAGS_simulated_data) + nav_cam_msgs_ptr = &empty_nav_cam_msgs; + else + nav_cam_msgs_ptr = &bag_handles["nav_cam"]->bag_msgs; + + // Save camera poses and other data for all desired cameras + for (size_t it = 0; it < cam_types.size(); it++) { + std::string cam_type = cam_types[it]; + + if (cam_type == "nav_cam" && FLAGS_simulated_data) { + std::cout << "The nav_cam is not supported with simulated data.\n"; + continue; } + + if (cam_type == "nav_cam" && !FLAGS_use_brisk_map && !FLAGS_simulated_data) { + // Take a shortcut, since the sparse map is made of precisely of + // the images we want to texture. Just save those images and + // their poses from the map, avoiding localization. + save_nav_cam_poses_and_images(sparse_map, + *nav_cam_msgs_ptr, + FLAGS_output_dir, cam_dirs["nav_cam"]); + continue; + } + + saveCameraPoses(FLAGS_simulated_data, cam_type, nav_to_cam_timestamp_offset[it], // NOLINT + nav_to_cam_trans[it], haz_cam_depth_to_image_transform, // NOLINT + FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, // NOLINT + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, // NOLINT + FLAGS_reliability_weight_exponent, median_filter_params, // NOLINT + bag_handles[cam_type]->bag_msgs, // desired cam msgs // NOLINT + *nav_cam_msgs_ptr, // nav msgs // NOLINT + haz_cam_points_handle.bag_msgs, nav_cam_bag_timestamps, // NOLINT + sci_cam_exif, FLAGS_output_dir, // NOLINT + cam_dirs[cam_type], FLAGS_start, FLAGS_duration, // NOLINT + FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, // NOLINT + sci_cam_timestamps, FLAGS_max_iso_times_exposure, // NOLINT + sparse_map, FLAGS_use_brisk_map, // NOLINT + do_haz_cam_image, sim_cam_poses[it], FLAGS_save_debug_data); // NOLINT + + if (FLAGS_simulated_data) + saveSimCamIntrinsics(FLAGS_ros_bag, cam_type, cam_dirs[cam_type]); } return 0; diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index 018ed92c..3d9b62b2 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -21,7 +21,6 @@ """ A wrapper around the tools that when run together produce a textured mesh. """ - import argparse import os import re @@ -37,7 +36,7 @@ def process_args(args): Set up the parser and parse the args. """ - # Extract some paths before the args are parsed + # Extract some paths before the arguments are parsed src_path = os.path.dirname(args[0]) exec_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(src_path))) @@ -51,7 +50,7 @@ def process_args(args): "--ros_bag", dest="ros_bag", default="", - help="A ROS bag with recorded nav_cam, haz_cam, and sci_cam data.", + help="A ROS bag with recorded image and point cloud data.", ) parser.add_argument( "--sparse_map", @@ -66,35 +65,32 @@ def process_args(args): help="The directory where to write the processed data.", ) parser.add_argument( - "--nav_cam_topic", - dest="nav_cam_topic", - default="/hw/cam_nav", - help="The nav cam topic in the bag file.", + "--camera_types", + dest="camera_types", + default="sci_cam nav_cam haz_cam", + help="Specify the cameras to use for the textures, as a list in quotes.", ) parser.add_argument( - "--haz_cam_points_topic", - dest="haz_cam_points_topic", - default="/hw/depth_haz/points", - help="The haz cam point cloud topic in the bag file.", + "--camera_topics", + dest="camera_topics", + default="/hw/cam_sci/compressed /mgt/img_sampler/nav_cam/image_record /hw/depth_haz/extended/amplitude_int", + help="Specify the bag topics for the cameras to texture (in the same order as in " + + "--camera_types). Use a list in quotes.", ) parser.add_argument( - "--haz_cam_intensity_topic", - dest="haz_cam_intensity_topic", - default="/hw/depth_haz/extended/amplitude_int", - help="The haz cam intensity topic in the bag file.", + "--undistorted_crop_wins", + dest="undistorted_crop_wins", + default="sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160", + help="The central region to keep after undistorting an image and " + + "before texturing. For sci_cam the numbers are at 1/4th of the full " + + "resolution and will be adjusted for the actual input image dimensions. " + + "Use a list in quotes.", ) parser.add_argument( - "--sci_cam_topic", - dest="sci_cam_topic", - default="/hw/cam_sci", - help="The sci cam topic in the bag file.", - ) - parser.add_argument( - "--camera_type", - dest="camera_type", - default="all", - help="Specify which cameras to process. By default, process all. " - + "Options: nav_cam, haz_cam, sci_cam.", + "--haz_cam_points_topic", + dest="haz_cam_points_topic", + default="/hw/depth_haz/points", + help="The depth point cloud topic in the bag file.", ) parser.add_argument( "--start", @@ -298,10 +294,10 @@ def process_args(args): + "from depth data in the current bag.", ) parser.add_argument( - "--scicam_to_hazcam_timestamp_offset_override_value", - dest="scicam_to_hazcam_timestamp_offset_override_value", + "--nav_cam_to_sci_cam_offset_override_value", + dest="nav_cam_to_sci_cam_offset_override_value", default="", - help="Override the value of scicam_to_hazcam_timestamp_offset " + help="Override the value of nav_cam_to_sci_cam_timestamp_offset " + "from the robot config file with this value.", ) parser.add_argument( @@ -318,10 +314,22 @@ def process_args(args): ) args = parser.parse_args() - return (src_path, exec_path, args) + # Parse the crop windows + crop_wins = args.undistorted_crop_wins.split() + crop_win_map = {} + for crop_win in crop_wins: + vals = crop_win.split(",") + if len(vals) != 3: + raise Exception( + "Invalid value for --undistorted_crop_wins: " + + args.undistorted_crop_wins + ) + crop_win_map[vals[0]] = vals[1:] + + return (src_path, exec_path, crop_win_map, args) -def sanity_checks(geometry_mapper_path, batch_tsdf_path, args): +def sanity_checks(geometry_mapper_path, batch_tsdf_path, crop_win_map, args): # Check if the environment was set for var in [ @@ -345,20 +353,43 @@ def sanity_checks(geometry_mapper_path, batch_tsdf_path, args): + "Specify it via --astrobee_build_dir." ) - if args.camera_type == "nav_cam" and args.simulated_data: - print( - "Cannot apply nav cam texture with simulated cameras " - + "as the gazebo distorted images are not accurate enough." - ) - sys.exit(1) - - if args.camera_type == "haz_cam" and args.simulated_data: - print("Texturing haz cam with simulated data was not tested.") - sys.exit(1) + camera_types = args.camera_types.split() if args.output_dir == "": raise Exception("The path to the output directory was not specified.") + if len(camera_types) != len(args.camera_topics.split()): + raise Exception("There must be as many camera types as camera topics.") + + if (not args.simulated_data) and len(camera_types) != len( + args.undistorted_crop_wins.split() + ): + raise Exception( + "There must be as many camera types as listed undistorted " + + "crop windows." + ) + + if args.simulated_data and "nav_cam" in camera_types: + raise Exception( + "The geometry mapper does not support nav_cam with simulated data as " + + "its distortion is not modeled." + ) + + if args.simulated_data and "haz_cam" not in camera_types: + raise Exception( + "The haz_cam must be one of the camera types in simulation mode " + + "as it is needed to read the simulated camera pose in order to " + + "process the depth clouds." + ) + + if not args.simulated_data: + for cam in camera_types: + if not (cam in crop_win_map): + raise Exception( + "No crop win specified in --undistorted_crop_wins for camera: " + + cam + ) + def mkdir_p(path): if path == "": @@ -378,13 +409,26 @@ def setup_outputs(args): mkdir_p(args.output_dir) +def format_cmd(cmd): + """If some command arguments have spaces, quote them. Then concatenate the results.""" + ans = "" + for val in cmd: + if " " in val or "\t" in cmd: + val = '"' + val + '"' + ans += val + " " + return ans + + def run_cmd(cmd, log_file, verbose=False): """ Run a command and write the output to a file. In verbose mode also print to screen. """ - print(" ".join(cmd) + "\n") + + cmd_str = format_cmd(cmd) + print(cmd_str + "\n") + with open(log_file, "w", buffering=0) as f: # replace 'w' with 'wb' for Python 3 - f.write(" ".join(cmd) + "\n") + f.write(cmd_str + "\n") process = subprocess.Popen(cmd, stdout=subprocess.PIPE) for line in iter( process.stdout.readline, "" @@ -409,20 +453,14 @@ def compute_poses_and_clouds(geometry_mapper_path, args): geometry_mapper_path, "--ros_bag", args.ros_bag, - "--sparse_map", - args.sparse_map, "--output_dir", args.output_dir, - "--nav_cam_topic", - args.nav_cam_topic, + "--camera_topics", + args.camera_topics, "--haz_cam_points_topic", args.haz_cam_points_topic, - "--haz_cam_intensity_topic", - args.haz_cam_intensity_topic, - "--sci_cam_topic", - args.sci_cam_topic, - "--camera_type", - args.camera_type, + "--camera_types", + args.camera_types, "--start", args.start, "--duration", @@ -455,17 +493,16 @@ def compute_poses_and_clouds(geometry_mapper_path, args): if args.simulated_data: cmd += ["--simulated_data"] + else: + cmd += ["--sparse_map", args.sparse_map] if args.save_debug_data: cmd += ["--save_debug_data"] - if args.external_mesh != "": - cmd += ["--external_mesh", args.external_mesh] - - if args.scicam_to_hazcam_timestamp_offset_override_value != "": + if args.nav_cam_to_sci_cam_offset_override_value != "": cmd += [ - "--scicam_to_hazcam_timestamp_offset_override_value", - args.scicam_to_hazcam_timestamp_offset_override_value, + "--nav_cam_to_sci_cam_offset_override_value", + args.nav_cam_to_sci_cam_offset_override_value, ] log_file = os.path.join(args.output_dir, "geometry_mapper_log.txt") @@ -598,7 +635,7 @@ def undistort_images( cmd = [ undistort_image_path, "--undistorted_crop_win", - undist_crop_win, + str(undist_crop_win[0]) + " " + str(undist_crop_win[1]), "--save_bgr", "--robot_camera", cam_type, @@ -706,7 +743,11 @@ def find_sci_cam_scale(image_file): return float(image_width) / float(config_width) -def texture_mesh(src_path, cam_type, mesh, args): +def texture_mesh(src_path, cam_type, crop_win_map, mesh, args): + if args.simulated_data and cam_type == "nav_cam": + print("Texturing nav_cam is not supported with simulated data.") + return "None" + dist_image_list = os.path.join(args.output_dir, cam_type + "_index.txt") with open(dist_image_list) as f: image_files = f.readlines() @@ -721,23 +762,17 @@ def texture_mesh(src_path, cam_type, mesh, args): if not args.simulated_data: - scale = 1.0 - undist_crop_win = "" - - if cam_type == "nav_cam": - scale = 1.0 - undist_crop_win = "1100 776" - elif cam_type == "haz_cam": + if cam_type == "sci_cam": + # The sci cam needs special treatment + scale = find_sci_cam_scale(image_files[0].rstrip()) + else: scale = 1.0 - undist_crop_win = "210 160" - elif cam_type == "sci_cam": - # Deal with the fact that the robot config file may assume - # a different resolution than what is in the bag - scale = find_sci_cam_scale(image_files[0].rstrip()) - crop_wd = 2 * int(round(625.0 * scale)) # an even number - crop_ht = 2 * int(round(500.0 * scale)) # an even number - undist_crop_win = str(crop_wd) + " " + str(crop_ht) + # Make the crop win even, it is just easier that way + undist_crop_win = [ + 2 * int(round(float(crop_win_map[cam_type][0]) * scale / 2.0)), + 2 * int(round(float(crop_win_map[cam_type][1]) * scale / 2.0)), + ] undistort_images( args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale @@ -826,14 +861,14 @@ def merge_poses_and_clouds(args): if __name__ == "__main__": - (src_path, exec_path, args) = process_args(sys.argv) + (src_path, exec_path, crop_win_map, args) = process_args(sys.argv) geometry_mapper_path = os.path.join(exec_path, "geometry_mapper") batch_tsdf_path = os.path.join( os.environ["HOME"], "catkin_ws/devel/lib/voxblox_ros/batch_tsdf" ) - sanity_checks(geometry_mapper_path, batch_tsdf_path, args) + sanity_checks(geometry_mapper_path, batch_tsdf_path, crop_win_map, args) setup_outputs(args) @@ -852,70 +887,62 @@ def merge_poses_and_clouds(args): # Smothing must happen before hole-filling, to remove weird # artifacts smooth_mesh = os.path.join(args.output_dir, "smooth_mesh.ply") - if start_step <= 2 and args.external_mesh == "": + if start_step <= 2 and args.external_mesh == "" and (not args.simulated_data): attempt = 1 smoothe_mesh(fused_mesh, smooth_mesh, args, attempt) # Fill holes hole_filled_mesh = os.path.join(args.output_dir, "hole_filled_mesh.ply") - if start_step <= 3 and args.external_mesh == "": + if start_step <= 3 and args.external_mesh == "" and (not args.simulated_data): attempt = 1 fill_holes_in_mesh(smooth_mesh, hole_filled_mesh, args, attempt) # Rm small connected components clean_mesh = os.path.join(args.output_dir, "clean_mesh.ply") - if start_step <= 4 and args.external_mesh == "": + if start_step <= 4 and args.external_mesh == "" and (not args.simulated_data): rm_connected_components(hole_filled_mesh, clean_mesh, args) # Smoothe again smooth_mesh2 = os.path.join(args.output_dir, "smooth_mesh2.ply") - if start_step <= 5 and args.external_mesh == "": + if start_step <= 5 and args.external_mesh == "" and (not args.simulated_data): attempt = 2 smoothe_mesh(clean_mesh, smooth_mesh2, args, attempt) - # Fill holes again. That is necessary, and should happen after smoothing. - # Mesh cleaning creates small holes and they can't be cleaned well - # without some more smoothing like above. + # Fill holes again. That is necessary, and should happen after + # smoothing. Mesh cleaning creates small holes and they can't be + # cleaned well without some more smoothing like above. hole_filled_mesh2 = os.path.join(args.output_dir, "hole_filled_mesh2.ply") - if start_step <= 6 and args.external_mesh == "": + if start_step <= 6 and args.external_mesh == "" and (not args.simulated_data): attempt = 2 fill_holes_in_mesh(smooth_mesh2, hole_filled_mesh2, args, attempt) # Smoothe again as filling holes can make the mesh a little rough smooth_mesh3 = os.path.join(args.output_dir, "smooth_mesh3.ply") - if start_step <= 7 and args.external_mesh == "": + if start_step <= 7 and args.external_mesh == "" and (not args.simulated_data): attempt = 3 smoothe_mesh(hole_filled_mesh2, smooth_mesh3, args, attempt) # Simplify the mesh simplified_mesh = os.path.join(args.output_dir, "simplified_mesh.ply") - if start_step <= 8 and args.external_mesh == "": + if start_step <= 8 and args.external_mesh == "" and (not args.simulated_data): simplify_mesh(smooth_mesh3, simplified_mesh, args) - if args.external_mesh != "": - # So that can texture on top of it + if args.simulated_data: + simplified_mesh = fused_mesh + elif args.external_mesh != "": simplified_mesh = args.external_mesh - nav_textured_mesh = "" - haz_textured_mesh = "" - sci_textured_mesh = "" + textured_meshes = [] if start_step <= 9: - # For simulated data texturing nav cam images is not supported - if (not args.simulated_data) and ( - args.camera_type == "all" or args.camera_type == "nav_cam" - ): - nav_textured_mesh = texture_mesh(src_path, "nav_cam", simplified_mesh, args) - - # For simulated data texturing haz cam images is not supported - if (not args.simulated_data) and ( - args.camera_type == "all" or args.camera_type == "haz_cam" - ): - haz_textured_mesh = texture_mesh(src_path, "haz_cam", simplified_mesh, args) - - if args.camera_type == "all" or args.camera_type == "sci_cam": - sci_textured_mesh = texture_mesh(src_path, "sci_cam", simplified_mesh, args) - - if args.external_mesh == "": + for camera_type in args.camera_types.split(): + textured_mesh = texture_mesh( + src_path, camera_type, crop_win_map, simplified_mesh, args + ) + textured_meshes += [textured_mesh] + + if args.simulated_data: + print("Fused mesh: " + fused_mesh) + elif args.external_mesh == "": print("Fused mesh: " + fused_mesh) print("Smoothed mesh: " + smooth_mesh) print("Hole-filled mesh: " + hole_filled_mesh) @@ -927,11 +954,7 @@ def merge_poses_and_clouds(args): else: print("External mesh: " + args.external_mesh) - if nav_textured_mesh != "": - print("Nav cam textured mesh: " + nav_textured_mesh) - - if haz_textured_mesh != "": - print("Haz cam textured mesh: " + haz_textured_mesh) - - if sci_textured_mesh != "": - print("Sci cam textured mesh: " + sci_textured_mesh) + count = 0 + for camera_type in args.camera_types.split(): + print(camera_type + " textured mesh: " + textured_meshes[count]) + count += 1 diff --git a/dense_map/geometry_mapper/tools/image_picker.cc b/dense_map/geometry_mapper/tools/image_picker.cc index d8e14135..6e010ed0 100644 --- a/dense_map/geometry_mapper/tools/image_picker.cc +++ b/dense_map/geometry_mapper/tools/image_picker.cc @@ -60,7 +60,8 @@ DEFINE_string(ros_bag, "", DEFINE_string(output_nav_cam_dir, "", "The path to a file saving the nav cam image list."); -DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); +DEFINE_string(nav_cam_topic, "/mgt/img_sampler/nav_cam/image_record", + "The nav cam topic in the bag file."); DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); @@ -77,14 +78,13 @@ DEFINE_double(bracket_len, 2.0, "whose distance in time is no more than this (in seconds). It is assumed " "the robot moves slowly and uniformly during this time."); -DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, +DEFINE_double(nav_cam_to_sci_cam_offset_override_value, std::numeric_limits::quiet_NaN(), - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); int main(int argc, char** argv) { ff_common::InitFreeFlyerApplication(&argc, &argv); - if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; if (FLAGS_output_nav_cam_dir.empty()) LOG(FATAL) << "The output nav cam dir was not specified."; @@ -105,49 +105,49 @@ int main(int argc, char** argv) { dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); - // Read the config file - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", - "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", - "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, - sci_cam_params); + std::vector cam_types = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector cam_params; + std::vector nav_to_cam_trans; + std::vector nav_to_cam_timestamp_offset; + Eigen::Affine3d nav_cam_to_body_trans; + Eigen::Affine3d haz_cam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_types, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, nav_to_cam_trans, nav_to_cam_timestamp_offset, nav_cam_to_body_trans, + haz_cam_depth_to_image_transform); + + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_types.size(); it++) { + if (cam_types[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << nav_to_cam_timestamp_offset[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + nav_to_cam_timestamp_offset[it] = new_val; + } + } + } // This will be used for output std::map haz_depth_to_image_timestamps, haz_image_to_depth_timestamps; std::vector all_nav_cam_timestamps, all_haz_cam_timestamps, all_sci_cam_timestamps; - double navcam_to_scicam_timestamp_offset - = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; + // Here we count on the order of cam_types above + double nav_cam_to_haz_cam_timestamp_offset = nav_to_cam_timestamp_offset[1]; + double nav_cam_to_sci_cam_timestamp_offset = nav_to_cam_timestamp_offset[2]; - if (!std::isnan(FLAGS_scicam_to_hazcam_timestamp_offset_override_value)) { - double new_val = FLAGS_scicam_to_hazcam_timestamp_offset_override_value; - std::cout << "Overriding scicam_to_hazcam_timestamp_offset.\n"; - scicam_to_hazcam_timestamp_offset = new_val; - } - - std::cout << "navcam_to_hazcam_timestamp_offset = " << navcam_to_hazcam_timestamp_offset << std::endl; - std::cout << "scicam_to_hazcam_timestamp_offset = " << scicam_to_hazcam_timestamp_offset << std::endl; + std::cout << "nav_cam_to_haz_cam_timestamp_offset = " << nav_cam_to_haz_cam_timestamp_offset << "\n"; + std::cout << "nav_cam_to_sci_cam_timestamp_offset = " << nav_cam_to_sci_cam_timestamp_offset << "\n"; - // TODO(oalexan1): Make these into functions + // TODO(oalexan1): Make the logic below into functions // Read the haz cam data we will need double haz_cam_start_time = -1.0; std::vector const& haz_cam_intensity_msgs = haz_cam_intensity_handle.bag_msgs; for (size_t it = 0; it < haz_cam_intensity_msgs.size(); it++) { - sensor_msgs::Image::ConstPtr image_msg = haz_cam_intensity_msgs[it].instantiate(); + sensor_msgs::Image::ConstPtr image_msg + = haz_cam_intensity_msgs[it].instantiate(); if (image_msg) { double haz_cam_time = image_msg->header.stamp.toSec(); all_haz_cam_timestamps.push_back(haz_cam_time); @@ -232,8 +232,8 @@ int main(int argc, char** argv) { for (int sci_it = 0; sci_it < num_sci; sci_it++) { for (int nav_it1 = nav_cam_pos; nav_it1 < num_nav; nav_it1++) { // Adjust for timestamp offsets - double t1 = all_nav_cam_timestamps[nav_it1] + navcam_to_hazcam_timestamp_offset; - double s = all_sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; + double t1 = all_nav_cam_timestamps[nav_it1]; + double s = all_sci_cam_timestamps[sci_it] - nav_cam_to_sci_cam_timestamp_offset; bool is_good1 = (t1 >= s - d/2.0 && t1 <= s); @@ -243,8 +243,7 @@ int main(int argc, char** argv) { // Now get the right bracket for (size_t nav_it2 = num_nav - 1; nav_it2 >= 0; nav_it2--) { - // Adjust for timestamp offsets - double t2 = all_nav_cam_timestamps[nav_it2] + navcam_to_hazcam_timestamp_offset; + double t2 = all_nav_cam_timestamps[nav_it2]; bool is_good2 = (s < t2 && t2 <= s + d/2.0); if (is_good2) { @@ -281,6 +280,5 @@ int main(int argc, char** argv) { std::cout << "Writing: " << filename_buffer << std::endl; cv::imwrite(filename_buffer, image); } - return 0; } diff --git a/dense_map/geometry_mapper/tools/process_bag.cc b/dense_map/geometry_mapper/tools/scale_bag.cc similarity index 100% rename from dense_map/geometry_mapper/tools/process_bag.cc rename to dense_map/geometry_mapper/tools/scale_bag.cc diff --git a/dense_map/geometry_mapper/tools/streaming_mapper.cc b/dense_map/geometry_mapper/tools/streaming_mapper.cc index fe88c933..5ec9ec61 100644 --- a/dense_map/geometry_mapper/tools/streaming_mapper.cc +++ b/dense_map/geometry_mapper/tools/streaming_mapper.cc @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -77,9 +76,12 @@ class StreamingMapper { void ReadParams(ros::NodeHandle const& nh); void publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, - double max_iso_times_exposure, double iso, double exposure, int processed_camera_count, - cv::Mat const& image, double image_timestamp, camera::CameraModel const& cam, - std::vector& smallest_cost_per_face, std::string const& out_prefix); + double max_iso_times_exposure, double iso, double exposure, + int processed_camera_count, + cv::Mat const& image, double image_timestamp, + camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, + std::string const& out_prefix); void TextureCamSimPoseCallback(const geometry_msgs::PoseStamped::ConstPtr& pose); void TextureCamSimInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& info); @@ -109,24 +111,25 @@ class StreamingMapper { config_reader::ConfigReader config_params; camera::CameraParameters m_texture_cam_params; - sensor_msgs::Image texture_obj_msg, texture_mtl_msg; + sensor_msgs::Image m_texture_obj_msg, m_texture_mtl_msg; - double navcam_to_texture_cam_timestamp_offset; - Eigen::MatrixXd texture_cam_to_navcam_trans; - Eigen::MatrixXd navcam_to_body_trans; + double m_navcam_to_texture_cam_timestamp_offset; + Eigen::MatrixXd m_texture_cam_to_navcam_trans; + Eigen::MatrixXd m_navcam_to_body_trans; - std::string nav_cam_pose_topic, ekf_state_topic, ekf_pose_topic, texture_cam_topic, sci_cam_exif_topic, - texture_cam_type, mesh_file; - bool sim_mode, save_to_disk, use_single_texture; + std::string nav_cam_pose_topic, ekf_state_topic, ekf_pose_topic, texture_cam_topic, + sci_cam_exif_topic, m_texture_cam_type, mesh_file; + bool m_sim_mode, save_to_disk, m_sci_cam_exp_corr, use_single_texture; // For meshing mve::TriangleMesh::Ptr mesh; std::shared_ptr mesh_info; - std::shared_ptr graph; // TODO(oalexan1): Is this necessary? + std::shared_ptr graph; std::shared_ptr bvh_tree; // Each callback must have a lock - std::mutex nav_cam_pose_lock, texture_cam_pose_lock, texture_cam_info_lock, texture_cam_image_lock, sci_cam_exif_lock; + std::mutex nav_cam_pose_lock, texture_cam_pose_lock, texture_cam_info_lock, + texture_cam_image_lock, sci_cam_exif_lock; // Data indexed by timestamp std::map nav_cam_localization_poses; @@ -137,9 +140,9 @@ class StreamingMapper { // For processing pictures as the camera moves along double dist_between_processed_cams, max_iso_times_exposure; - Eigen::Vector3d last_processed_cam_ctr; - std::vector smallest_cost_per_face; - int processed_camera_count; + Eigen::Vector3d m_last_processed_cam_ctr; + std::vector m_smallest_cost_per_face; + int m_processed_camera_count; double num_exclude_boundary_pixels; // The info for each face which allows one later to project an image onto @@ -161,11 +164,11 @@ class StreamingMapper { StreamingMapper::StreamingMapper() : m_texture_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)), - navcam_to_texture_cam_timestamp_offset(0.0), - texture_cam_to_navcam_trans(Eigen::MatrixXd::Identity(4, 4)), - navcam_to_body_trans(Eigen::MatrixXd::Identity(4, 4)), - last_processed_cam_ctr(Eigen::Vector3d(std::numeric_limits::quiet_NaN(), 0.0, 0.0)), - processed_camera_count(0) {} + m_navcam_to_texture_cam_timestamp_offset(0.0), + m_texture_cam_to_navcam_trans(Eigen::MatrixXd::Identity(4, 4)), + m_navcam_to_body_trans(Eigen::MatrixXd::Identity(4, 4)), + m_last_processed_cam_ctr(Eigen::Vector3d(std::numeric_limits::quiet_NaN(), 0.0, 0.0)), + m_processed_camera_count(0) {} StreamingMapper::~StreamingMapper(void) { thread->join(); } @@ -176,7 +179,8 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { // Set the config path to ISAAC char* path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) ROS_FATAL("Could not find the config path."); + if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) + ROS_FATAL("Could not find the config path. Ensure that ISAAC_CONFIG_DIR was set."); config_params.SetPath(path); config_params.AddFile("dense_map/streaming_mapper.config"); @@ -185,87 +189,65 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { StreamingMapper::ReadParams(nh); // Read configuration data - if (!sim_mode) { - Eigen::MatrixXd hazcam_to_navcam_trans; - Eigen::MatrixXd scicam_to_hazcam_trans; - Eigen::Affine3d hazcam_depth_to_image_transform; - double navcam_to_hazcam_timestamp_offset, scicam_to_hazcam_timestamp_offset; - - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + if (!m_sim_mode) { + // Read the bot config file + std::vector cam_names = {m_texture_cam_type}; + std::vector cam_params; + std::vector nav_to_cam_trans; + std::vector nav_to_cam_timestamp_offsets; + Eigen::Affine3d hazcam_depth_to_image_transform; + Eigen::Affine3d navcam_to_body_trans; + + dense_map::readConfigFile( // Inputs + cam_names, + "nav_cam_transform", // this is the nav cam to body transform + "haz_cam_depth_to_image_transform", + // Outputs + cam_params, nav_to_cam_trans, nav_to_cam_timestamp_offsets, navcam_to_body_trans, + hazcam_depth_to_image_transform); + + m_navcam_to_body_trans = navcam_to_body_trans.matrix(); { // Note the lock, because m_texture_cam_params is a shared resource const std::lock_guard lock(texture_cam_info_lock); - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", - "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", - "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, - sci_cam_params); - - // Transform to convert from given camera to nav camera coordinates - if (texture_cam_type == "nav_cam") { - texture_cam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - navcam_to_texture_cam_timestamp_offset = 0.0; - m_texture_cam_params = nav_cam_params; - } else if (texture_cam_type == "haz_cam") { - texture_cam_to_navcam_trans = hazcam_to_navcam_trans; - navcam_to_texture_cam_timestamp_offset = navcam_to_hazcam_timestamp_offset; - m_texture_cam_params = haz_cam_params; - } else if (texture_cam_type == "sci_cam") { - texture_cam_to_navcam_trans = hazcam_to_navcam_trans * scicam_to_hazcam_trans; - navcam_to_texture_cam_timestamp_offset = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; - m_texture_cam_params = sci_cam_params; - } else { - LOG(FATAL) << "Invalid texture cam type: " << texture_cam_type; - } -#if 0 - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << "\n"; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << "\n"; - std::cout << "texture_cam_to_navcam_trans\n" << texture_cam_to_navcam_trans << "\n"; - std::cout << "navcam_to_hazcam_timestamp_offset: " - << navcam_to_hazcam_timestamp_offset - << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " - << scicam_to_hazcam_timestamp_offset - << "\n"; - std::cout << "hazcam_depth_to_image_transform\n" - << hazcam_depth_to_image_transform.matrix() - << "\n"; - - std::cout << "navcam_to_texture_cam_timestamp_offset: " - << navcam_to_texture_cam_timestamp_offset << "\n"; + // Index 0 below, based on the order in cam_names + m_texture_cam_to_navcam_trans = nav_to_cam_trans[0].inverse().matrix(); + m_navcam_to_texture_cam_timestamp_offset = nav_to_cam_timestamp_offsets[0]; + m_texture_cam_params = cam_params[0]; + + std::cout << "m_navcam_to_texture_cam_timestamp_offset: " + << m_navcam_to_texture_cam_timestamp_offset << "\n"; std::cout << "texture cam focal vector: " << m_texture_cam_params.GetFocalVector().transpose() << "\n"; -#endif } } // Set up the publishers - std::string mapper_img_topic = "/ism/" + texture_cam_type + "/img"; - std::string mapper_obj_topic = "/ism/" + texture_cam_type + "/obj"; - std::string mapper_mtl_topic = "/ism/" + texture_cam_type + "/mtl"; + std::string mapper_img_topic = "/ism/" + m_texture_cam_type + "/img"; + std::string mapper_obj_topic = "/ism/" + m_texture_cam_type + "/obj"; + std::string mapper_mtl_topic = "/ism/" + m_texture_cam_type + "/mtl"; ROS_INFO_STREAM("Publishing topics: " << mapper_img_topic << ' ' << mapper_obj_topic << ' ' << mapper_mtl_topic); texture_img_pub = nh.advertise(mapper_img_topic, 1); texture_obj_pub = nh.advertise(mapper_obj_topic, 1); texture_mtl_pub = nh.advertise(mapper_mtl_topic, 1); // Set up the subscribers - if (sim_mode) { + if (m_sim_mode) { // Subscribe to the simulated texture cam image pose and intrinsics. // Keep a bunch of them in the queue. // The name of these topics are TOPIC_HAZ_CAM_SIM_POSE, etc., in ff_names.h. - std::string texture_cam_sim_pose_topic = "/sim/" + texture_cam_type + "/pose"; - std::string texture_cam_sim_info_topic = "/sim/" + texture_cam_type + "/info"; - ROS_INFO_STREAM("Subscribing to " << texture_cam_sim_pose_topic); - ROS_INFO_STREAM("Subscribing to " << texture_cam_sim_info_topic); + std::string texture_cam_sim_pose_topic = "/sim/" + m_texture_cam_type + "/pose"; + std::string texture_cam_sim_info_topic = "/sim/" + m_texture_cam_type + "/info"; + ROS_INFO_STREAM("Subscribing to: " << texture_cam_sim_pose_topic); + ROS_INFO_STREAM("Subscribing to: " << texture_cam_sim_info_topic); texture_cam_pose_sub = - nh.subscribe(texture_cam_sim_pose_topic, 10, &StreamingMapper::TextureCamSimPoseCallback, this); + nh.subscribe(texture_cam_sim_pose_topic, 10, + &StreamingMapper::TextureCamSimPoseCallback, this); texture_cam_info_sub = - nh.subscribe(texture_cam_sim_info_topic, 10, &StreamingMapper::TextureCamSimInfoCallback, this); + nh.subscribe(texture_cam_sim_info_topic, 10, + &StreamingMapper::TextureCamSimInfoCallback, this); } else { // Get the nav cam pose topic or the ekf state topic, or the ekf // pose topic, from which the other camera poses are deduced. @@ -281,17 +263,20 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { } } - sci_cam_exif_topic = "/hw/sci_cam_exif"; - ROS_INFO_STREAM("Subscribing to sci cam exif topic: " << sci_cam_exif_topic); - sci_cam_exif_sub = nh.subscribe(sci_cam_exif_topic, 10, &StreamingMapper::SciCamExifCallback, this); + if (m_texture_cam_type == "sci_cam" && !m_sim_mode && m_sci_cam_exp_corr) { + sci_cam_exif_topic = "/hw/sci_cam_exif"; + ROS_INFO_STREAM("Subscribing to sci cam exif topic: " << sci_cam_exif_topic); + sci_cam_exif_sub = nh.subscribe(sci_cam_exif_topic, 10, + &StreamingMapper::SciCamExifCallback, this); + } // Subscribe to images. Keep just 2 in the queue as they can be big. image_transport.reset(new image_transport::ImageTransport(nh)); std::string compressed = "/compressed"; if (texture_cam_topic.length() >= compressed.length() && - texture_cam_topic.compare(texture_cam_topic.length() - compressed.length(), compressed.length(), compressed) == - 0) { + texture_cam_topic.compare(texture_cam_topic.length() - compressed.length(), + compressed.length(), compressed) == 0) { // texture_cam_topic ends with the string /compressed ROS_INFO_STREAM("Subscribing to compressed image topic: " << texture_cam_topic); compressed_texture_image_sub = @@ -308,10 +293,11 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { std::vector const& faces = mesh->get_faces(); int num_faces = faces.size(); - smallest_cost_per_face = std::vector(num_faces, 1.0e+100); + m_smallest_cost_per_face = std::vector(num_faces, 1.0e+100); if (use_single_texture) - dense_map::formModel(mesh, pixel_size, num_threads, face_projection_info, texture_atlases, texture_model); + dense_map::formModel(mesh, pixel_size, num_threads, face_projection_info, + texture_atlases, texture_model); } catch (std::exception& e) { LOG(FATAL) << "Could not load mesh.\n" << e.what() << "\n"; } @@ -330,7 +316,7 @@ void StreamingMapper::ReadParams(ros::NodeHandle const& nh) { ROS_FATAL("Could not read the ekf_state_topic parameter."); if (!config_params.GetStr("texture_cam_topic", &texture_cam_topic)) ROS_FATAL("Could not read the texture_cam_topic parameter."); - if (!config_params.GetStr("texture_cam_type", &texture_cam_type)) + if (!config_params.GetStr("texture_cam_type", &m_texture_cam_type)) ROS_FATAL("Could not read the texture_cam_type parameter."); if (!config_params.GetStr("mesh_file", &mesh_file)) ROS_FATAL("Could not read the mesh_file parameter."); if (!config_params.GetReal("dist_between_processed_cams", &dist_between_processed_cams)) @@ -345,46 +331,53 @@ void StreamingMapper::ReadParams(ros::NodeHandle const& nh) { // The sim_mode parameter is very important, it must be read early // on as a lot of logic depends on its value. - nh.getParam("sim_mode", sim_mode); - - if (!config_params.GetBool("save_to_disk", &save_to_disk)) ROS_FATAL("Could not read the save_to_disk parameter."); + nh.getParam("sim_mode", m_sim_mode); if (!config_params.GetBool("use_single_texture", &use_single_texture)) ROS_FATAL("Could not read the use_single_texture parameter."); - ROS_INFO_STREAM("Texture camera type = " << texture_cam_type); + if (!config_params.GetBool("sci_cam_exposure_correction", &m_sci_cam_exp_corr)) + ROS_FATAL("Could not read the sci_cam_exposure_correction parameter."); + + if (!config_params.GetBool("save_to_disk", &save_to_disk)) + ROS_FATAL("Could not read the save_to_disk parameter."); + + ROS_INFO_STREAM("Texture camera type = " << m_texture_cam_type); ROS_INFO_STREAM("Mesh = " << mesh_file); - if (!sim_mode) { + if (!m_sim_mode) { int num = (!nav_cam_pose_topic.empty()) + (!ekf_state_topic.empty()) + (!ekf_pose_topic.empty()); if (num != 1) LOG(FATAL) << "Must specify exactly only one of nav_cam_pose_topic, " << "ekf_state_topic, ekf_pose_topic."; ROS_INFO_STREAM("dist_between_processed_cams = " << dist_between_processed_cams); - ROS_INFO_STREAM("max_iso_times_exposure = " << max_iso_times_exposure); - ROS_INFO_STREAM("use_single_texture = " << use_single_texture); - ROS_INFO_STREAM("pixel_size = " << pixel_size); - ROS_INFO_STREAM("num_threads = " << num_threads); - ROS_INFO_STREAM("sim_mode = " << sim_mode); - ROS_INFO_STREAM("save_to_disk = " << save_to_disk); + ROS_INFO_STREAM("max_iso_times_exposure = " << max_iso_times_exposure); + ROS_INFO_STREAM("use_single_texture = " << use_single_texture); + ROS_INFO_STREAM("sci_cam_exposure_correction = " << m_sci_cam_exp_corr); + ROS_INFO_STREAM("pixel_size = " << pixel_size); + ROS_INFO_STREAM("num_threads = " << num_threads); + ROS_INFO_STREAM("sim_mode = " << m_sim_mode); + ROS_INFO_STREAM("save_to_disk = " << save_to_disk); ROS_INFO_STREAM("num_exclude_boundary_pixels = " << num_exclude_boundary_pixels); - ROS_INFO_STREAM("ASTROBEE_ROBOT = " << getenv("ASTROBEE_ROBOT")); + ROS_INFO_STREAM("ASTROBEE_ROBOT = " << getenv("ASTROBEE_ROBOT")); + ROS_INFO_STREAM("ASTROBEE_WORLD = " << getenv("ASTROBEE_WORLD")); } - if (texture_cam_type != "nav_cam" && texture_cam_type != "sci_cam" && texture_cam_type != "haz_cam" && - texture_cam_type != "heat_cam" && texture_cam_type != "acoustics_cam") - LOG(FATAL) << "Invalid texture cam type: " << texture_cam_type; - // For a camera like sci_cam, the word "sci" better be part of texture_cam_topic. - std::string cam_name = texture_cam_type.substr(0, 3); + std::string cam_name = m_texture_cam_type.substr(0, 3); if (texture_cam_topic.find(cam_name) == std::string::npos) - LOG(FATAL) << "Texture topic " << texture_cam_topic << " is expected to contain the string " << cam_name; + LOG(FATAL) << "Texture topic " << texture_cam_topic << " is expected to contain the string " + << cam_name; } -void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, - double max_iso_times_exposure, double iso, double exposure, - int processed_camera_count, cv::Mat const& image, double image_timestamp, - camera::CameraModel const& cam, std::vector& smallest_cost_per_face, +void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, + std::shared_ptr bvh_tree, + double max_iso_times_exposure, + double iso, double exposure, + int processed_camera_count, + cv::Mat const& image, double image_timestamp, + camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, std::string const& out_prefix) { omp_set_dynamic(0); // Explicitly disable dynamic teams omp_set_num_threads(num_threads); // Use this many threads for all consecutive @@ -427,12 +420,14 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std: std::map uv_map; if (!use_single_texture) { // Find the UV coordinates and the faces having them - dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, num_exclude_boundary_pixels, smallest_cost_per_face, + dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, num_exclude_boundary_pixels, + smallest_cost_per_face, face_vec, uv_map); msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", *img_ptr).toImageMsg(); } else { - dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, smallest_cost_per_face, pixel_size, num_threads, + dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, smallest_cost_per_face, + pixel_size, num_threads, face_projection_info, texture_atlases, texture_model, model_texture); // Note that the output image has an alpha channel msg = cv_bridge::CvImage(std_msgs::Header(), "bgra8", model_texture).toImageMsg(); @@ -457,22 +452,23 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std: int height = obj_len / width + 1; if (width * height <= obj_len) LOG(FATAL) << "Not enough room allocated for the image."; - texture_obj_msg.header.stamp = ros::Time(image_timestamp); - texture_obj_msg.width = width; - texture_obj_msg.height = height; - texture_obj_msg.step = width; - texture_obj_msg.encoding = "mono8"; - texture_obj_msg.is_bigendian = false; - texture_obj_msg.data.resize(width * height); + m_texture_obj_msg.header.stamp = ros::Time(image_timestamp); + m_texture_obj_msg.width = width; + m_texture_obj_msg.height = height; + m_texture_obj_msg.step = width; + m_texture_obj_msg.encoding = "mono8"; + m_texture_obj_msg.is_bigendian = false; + m_texture_obj_msg.data.resize(width * height); std::copy(reinterpret_cast(&obj_str[0]), // input beg reinterpret_cast(&obj_str[0]) + obj_len, // input end - texture_obj_msg.data.begin()); // destination + m_texture_obj_msg.data.begin()); // destination // Pad with nulls - for (int it = obj_len; it < width * height; it++) texture_obj_msg.data[it] = '\0'; + for (int it = obj_len; it < width * height; it++) m_texture_obj_msg.data[it] = '\0'; // When using a single texture, publish just once - if (!use_single_texture || processed_camera_count == 0) texture_obj_pub.publish(texture_obj_msg); + if (!use_single_texture || processed_camera_count == 0) + texture_obj_pub.publish(m_texture_obj_msg); } // Publish the mtl string as a mono image with one row (this hack is temporary) @@ -480,35 +476,36 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std: if (!use_single_texture || processed_camera_count == 0 || save_to_disk) { formMtl(out_prefix, mtl_str); int mtl_len = mtl_str.size(); - texture_mtl_msg.header.stamp = ros::Time(image_timestamp); - texture_mtl_msg.width = mtl_len; - texture_mtl_msg.height = 1; - texture_mtl_msg.step = mtl_len; - texture_mtl_msg.encoding = "mono8"; - texture_mtl_msg.is_bigendian = false; - texture_mtl_msg.data.resize(mtl_len); + m_texture_mtl_msg.header.stamp = ros::Time(image_timestamp); + m_texture_mtl_msg.width = mtl_len; + m_texture_mtl_msg.height = 1; + m_texture_mtl_msg.step = mtl_len; + m_texture_mtl_msg.encoding = "mono8"; + m_texture_mtl_msg.is_bigendian = false; + m_texture_mtl_msg.data.resize(mtl_len); std::copy(reinterpret_cast(&mtl_str[0]), // input beg reinterpret_cast(&mtl_str[0]) + mtl_len, // input end - texture_mtl_msg.data.begin()); // destination + m_texture_mtl_msg.data.begin()); // destination - if (!use_single_texture || processed_camera_count == 0) texture_mtl_pub.publish(texture_mtl_msg); + if (!use_single_texture || processed_camera_count == 0) + texture_mtl_pub.publish(m_texture_mtl_msg); } if (save_to_disk) { std::string obj_file = out_prefix + ".obj"; - std::cout << "Writing: " << obj_file << std::endl; + std::cout << "Writing: ~/.ros/" << obj_file << std::endl; std::ofstream obj_handle(obj_file); obj_handle << obj_str; obj_handle.close(); std::string mtl_file = out_prefix + ".mtl"; - std::cout << "Writing: " << mtl_file << std::endl; + std::cout << "Writing: ~/.ros/" << mtl_file << std::endl; std::ofstream mtl_handle(mtl_file); mtl_handle << mtl_str; mtl_handle.close(); std::string png_file = out_prefix + ".png"; - std::cout << "Writing: " << png_file << std::endl; + std::cout << "Writing: ~/.ros/" << png_file << std::endl; if (!use_single_texture) cv::imwrite(png_file, *img_ptr); else @@ -519,7 +516,7 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std: // Add the latest received texture cam pose to the storage. This is meant to be used // only for simulated data. Else use the landmark and ekf callbacks. void StreamingMapper::TextureCamSimPoseCallback(const geometry_msgs::PoseStamped::ConstPtr& pose) { - if (!sim_mode) return; + if (!m_sim_mode) return; // Add the latest pose. Use a lock. const std::lock_guard lock(texture_cam_pose_lock); @@ -539,7 +536,7 @@ void StreamingMapper::TextureCamSimPoseCallback(const geometry_msgs::PoseStamped } void StreamingMapper::TextureCamSimInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& info) { - if (!sim_mode) return; + if (!m_sim_mode) return; // Initialize m_texture_cam_params just once. Use a lock. if (m_texture_cam_params.GetFocalVector() == Eigen::Vector2d(0, 0)) { @@ -580,13 +577,13 @@ void StreamingMapper::AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3 // Must compensate for the fact that the nav cam, haz cam, and texture cam all // have some time delay among them - double texture_cam_timestamp = nav_cam_timestamp + navcam_to_texture_cam_timestamp_offset; + double texture_cam_timestamp = nav_cam_timestamp + m_navcam_to_texture_cam_timestamp_offset; // Keep in mind that nav_cam_pose is the transform from the nav cam // to the world. Hence the matrices are multiplied in the order as // below. Eigen::Affine3d texture_cam_pose; - texture_cam_pose.matrix() = nav_cam_pose.matrix() * texture_cam_to_navcam_trans; + texture_cam_pose.matrix() = nav_cam_pose.matrix() * m_texture_cam_to_navcam_trans; { // Add the latest pose. Use a lock. const std::lock_guard lock(texture_cam_pose_lock); @@ -601,7 +598,7 @@ void StreamingMapper::AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3 // This callback takes as input the nav_cam pose as output by localization_node. void StreamingMapper::LandmarkCallback(ff_msgs::VisualLandmarks::ConstPtr const& vl) { - if (sim_mode) return; + if (m_sim_mode) return; double nav_cam_timestamp = vl->header.stamp.toSec(); Eigen::Affine3d nav_cam_pose; @@ -613,7 +610,7 @@ void StreamingMapper::LandmarkCallback(ff_msgs::VisualLandmarks::ConstPtr const& // This callback takes as input the robot pose as output by ekf on // ekf_state_topic (it should be set to /gnc/ekf) void StreamingMapper::EkfStateCallback(ff_msgs::EkfState::ConstPtr const& ekf_state) { - if (sim_mode) return; + if (m_sim_mode) return; double nav_cam_timestamp = ekf_state->header.stamp.toSec(); Eigen::Affine3d body_pose; @@ -623,13 +620,13 @@ void StreamingMapper::EkfStateCallback(ff_msgs::EkfState::ConstPtr const& ekf_st // Use the equation: body_to_world = navcam_to_world * body_to_navcam, written equivalently as: // navcam_to_world = body_to_world * navcam_to_body Eigen::Affine3d nav_cam_pose; - nav_cam_pose.matrix() = body_pose.matrix() * navcam_to_body_trans; + nav_cam_pose.matrix() = body_pose.matrix() * m_navcam_to_body_trans; StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); } // This callback takes as input the robot pose as output by ekf on /loc/pose. void StreamingMapper::EkfPoseCallback(geometry_msgs::PoseStamped::ConstPtr const& ekf_pose) { - if (sim_mode) return; + if (m_sim_mode) return; double nav_cam_timestamp = ekf_pose->header.stamp.toSec(); Eigen::Affine3d body_pose; @@ -639,13 +636,13 @@ void StreamingMapper::EkfPoseCallback(geometry_msgs::PoseStamped::ConstPtr const // Use the equation: body_to_world = navcam_to_world * body_to_navcam, written equivalently as: // navcam_to_world = body_to_world * navcam_to_body Eigen::Affine3d nav_cam_pose; - nav_cam_pose.matrix() = body_pose.matrix() * navcam_to_body_trans; + nav_cam_pose.matrix() = body_pose.matrix() * m_navcam_to_body_trans; StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); } // This callback takes as input the robot pose as output by ekf on /loc/pose. void StreamingMapper::SciCamExifCallback(std_msgs::Float64MultiArray::ConstPtr const& exif) { - if (sim_mode) return; + if (m_sim_mode || !m_sci_cam_exp_corr) return; double timestamp = exif->data[dense_map::TIMESTAMP]; @@ -668,17 +665,21 @@ void StreamingMapper::SciCamExifCallback(std_msgs::Float64MultiArray::ConstPtr c void StreamingMapper::WipeOldImages() { // Wipe the oldest images. Keep just a few. - size_t num_to_keep = 10; // for sci_cam - if (texture_cam_type == "nav_cam") { - // For small images that get published often keep a bunch of them, - // as sometimes there is a delay for when the pose arrives, - // and if not enough images are kept they are wiped before - // they can be used. - // TODO(oalexan1): This is not robust enough. + // For small images that get published often keep a bunch of them, + // as sometimes there is a delay for when the pose arrives, + // and if not enough images are kept they are wiped before + // they can be used. + // TODO(oalexan1): This is not robust enough. + size_t num_to_keep = 0; + if (m_texture_cam_type == "nav_cam") { num_to_keep = 50; - } - if (texture_cam_type == "haz_cam") { + } else if (m_texture_cam_type == "haz_cam") { + // These are small num_to_keep = 100; + } else if (m_texture_cam_type == "sci_cam") { + num_to_keep = 10; + } else { + num_to_keep = 25; // something in the middle } while (texture_cam_images.size() > num_to_keep) texture_cam_images.erase(texture_cam_images.begin()); @@ -778,7 +779,7 @@ void StreamingMapper::ProcessingLoop() { } // Copy the exif data locally using a lock - bool need_exif = (!sim_mode && texture_cam_type == "sci_cam"); + bool need_exif = (!m_sim_mode && m_texture_cam_type == "sci_cam" && m_sci_cam_exp_corr); std::map > local_sci_cam_exif; if (need_exif) { @@ -817,7 +818,8 @@ void StreamingMapper::ProcessingLoop() { continue; } - if (need_exif && local_sci_cam_exif.find(texture_cam_image_timestamp) == local_sci_cam_exif.end()) { + if (need_exif && + local_sci_cam_exif.find(texture_cam_image_timestamp) == local_sci_cam_exif.end()) { // Skip if the exif info did not arrive yet continue; } @@ -830,7 +832,8 @@ void StreamingMapper::ProcessingLoop() { success = true; // Wipe this image and images older than this as we will never use them - while (texture_cam_images.size() > 0 && texture_cam_images.begin()->first <= texture_cam_image_timestamp) + while (texture_cam_images.size() > 0 && + texture_cam_images.begin()->first <= texture_cam_image_timestamp) texture_cam_images.erase(texture_cam_images.begin()); // Stop given that we have found a good image to process @@ -852,13 +855,14 @@ void StreamingMapper::ProcessingLoop() { // Find the interpolated pose at the current image Eigen::Affine3d curr_texture_cam_pose; - if (!dense_map::findInterpPose(texture_cam_image_timestamp, local_texture_cam_poses, curr_texture_cam_pose)) + if (!dense_map::findInterpPose(texture_cam_image_timestamp, local_texture_cam_poses, + curr_texture_cam_pose)) LOG(FATAL) << "Could not bracket the timestamp. Should have worked at this stage."; camera::CameraModel cam(curr_texture_cam_pose.inverse(), local_texture_cam_params); Eigen::Vector3d cam_ctr = cam.GetPosition(); - double dist_to_prev_processed = (last_processed_cam_ctr - cam_ctr).norm(); + double dist_to_prev_processed = (m_last_processed_cam_ctr - cam_ctr).norm(); bool do_process = (std::isnan(dist_to_prev_processed) || // for the first camera to process dist_to_prev_processed > dist_between_processed_cams); @@ -889,16 +893,17 @@ void StreamingMapper::ProcessingLoop() { // Publish the textured mesh in obj format std::ostringstream oss; - oss << "processed_" << 1000 + processed_camera_count; // add 1000 to list them nicely + oss << "processed_" << 1000 + m_processed_camera_count; // add 1000 to list them nicely std::string out_prefix = oss.str(); - publishTexturedMesh(mesh, bvh_tree, max_iso_times_exposure, iso, exposure, processed_camera_count, - texture_cam_image, texture_cam_image_timestamp, cam, smallest_cost_per_face, out_prefix); + publishTexturedMesh(mesh, bvh_tree, max_iso_times_exposure, iso, exposure, + m_processed_camera_count, texture_cam_image, texture_cam_image_timestamp, + cam, m_smallest_cost_per_face, out_prefix); // Save this for next time - last_processed_cam_ctr = cam_ctr; - processed_camera_count++; + m_last_processed_cam_ctr = cam_ctr; + m_processed_camera_count++; - std::cout << "Total processing took " << timer.get_elapsed() / 1000.0 << " seconds\n"; + std::cout << "Texture processing took " << timer.get_elapsed() / 1000.0 << " seconds\n"; } } diff --git a/isaac/config/dense_map/streaming_mapper.config b/isaac/config/dense_map/streaming_mapper.config index 6b78cc63..20d7bee3 100644 --- a/isaac/config/dense_map/streaming_mapper.config +++ b/isaac/config/dense_map/streaming_mapper.config @@ -48,6 +48,7 @@ texture_cam_type = "sci_cam"; texture_cam_topic = "/hw/cam_sci/compressed"; dist_between_processed_cams = 0.10; max_iso_times_exposure = 5.1; +sci_cam_exposure_correction = true; use_single_texture = true; pixel_size = 0.001; diff --git a/isaac/resources/rviz/iss.rviz b/isaac/resources/rviz/iss.rviz index 1a0f6cc1..d75f7162 100644 --- a/isaac/resources/rviz/iss.rviz +++ b/isaac/resources/rviz/iss.rviz @@ -579,6 +579,17 @@ Visualization Manager: Transport Hint: raw Unreliable: false Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /hw/depth_haz/extended/amplitude_int + Median window: 5 + Min Value: 0 + Name: "DEBUG: HazCam intensity" + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: @@ -1164,6 +1175,8 @@ Window Geometry: collapsed: false "DEBUG: SciCam": collapsed: false + "DEBUG: HazCam": + collapsed: false Displays: collapsed: false Height: 788 From ef8ad2c02d089d60a6fdcaea0652c6261b7544df Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Mon, 27 Dec 2021 16:09:38 -0800 Subject: [PATCH 16/40] adding soundsee activity plans (#12) --- .../resources/scicam_panorama_geometry.txt | 52 +++++++++++++++++++ .../resources/soundsee_background_noise.txt | 48 +++++++++++++++++ .../resources/soundsee_backround_noise.txt | 2 - .../resources/soundsee_panorama_geometry.txt | 16 ++++++ 4 files changed, 116 insertions(+), 2 deletions(-) create mode 100644 astrobee/behaviors/inspection/resources/scicam_panorama_geometry.txt create mode 100644 astrobee/behaviors/inspection/resources/soundsee_background_noise.txt delete mode 100644 astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt create mode 100644 astrobee/behaviors/inspection/resources/soundsee_panorama_geometry.txt diff --git a/astrobee/behaviors/inspection/resources/scicam_panorama_geometry.txt b/astrobee/behaviors/inspection/resources/scicam_panorama_geometry.txt new file mode 100644 index 00000000..5cd5cd2b --- /dev/null +++ b/astrobee/behaviors/inspection/resources/scicam_panorama_geometry.txt @@ -0,0 +1,52 @@ +10.90 -9.00 4.88 0.00 -90.00 0.00 +10.86 -9.00 4.95 0.00 -60.00 0.00 +10.89 -9.08 4.95 0.00 -60.00 36.00 +10.96 -9.14 4.95 0.00 -60.00 72.00 +11.04 -9.14 4.95 0.00 -60.00 108.00 +11.11 -9.08 4.95 0.00 -60.00 144.00 +11.14 -9.00 4.95 0.00 -60.00 180.00 +11.12 -8.92 4.95 0.00 -60.00 -144.01 +11.04 -8.86 4.95 0.00 -60.00 -108.01 +10.96 -8.86 4.95 0.00 -60.00 -72.01 +10.89 -8.92 4.95 0.00 -60.00 -36.01 +10.85 -9.00 5.02 0.00 -30.00 0.00 +10.88 -9.09 5.02 0.00 -30.00 36.00 +10.95 -9.14 5.02 0.00 -30.00 72.00 +11.05 -9.14 5.02 0.00 -30.00 108.00 +11.12 -9.09 5.02 0.00 -30.00 144.00 +11.15 -9.00 5.02 0.00 -30.00 180.00 +11.12 -8.91 5.02 0.00 -30.00 -144.01 +11.05 -8.86 5.02 0.00 -30.00 -108.01 +10.95 -8.86 5.02 0.00 -30.00 -72.01 +10.88 -8.91 5.02 0.00 -30.00 -36.01 +10.88 -9.00 5.10 0.00 0.00 0.00 +10.90 -9.07 5.10 0.00 0.00 36.00 +10.96 -9.11 5.10 0.00 0.00 72.00 +11.04 -9.11 5.10 0.00 0.00 108.00 +11.10 -9.07 5.10 0.00 0.00 144.00 +11.12 -9.00 5.10 0.00 0.00 180.00 +11.10 -8.93 5.10 0.00 -0.00 -144.01 +11.04 -8.89 5.10 0.00 0.00 -108.01 +10.96 -8.89 5.10 0.00 0.00 -72.01 +10.90 -8.93 5.10 0.00 0.00 -36.01 +10.95 -9.00 5.14 0.00 30.00 0.00 +10.96 -9.03 5.14 0.00 30.00 36.00 +10.98 -9.05 5.14 0.00 30.00 72.00 +11.02 -9.05 5.14 0.00 30.00 108.00 +11.04 -9.03 5.14 0.00 30.00 144.00 +11.05 -9.00 5.14 0.00 30.00 180.00 +11.04 -8.97 5.14 0.00 30.00 -144.01 +11.02 -8.95 5.14 0.00 30.00 -108.01 +10.98 -8.95 5.14 0.00 30.00 -72.01 +10.96 -8.97 5.14 0.00 30.00 -36.01 +11.02 -9.00 5.15 0.00 60.00 0.00 +11.02 -8.99 5.15 0.00 60.00 36.00 +11.01 -8.98 5.15 0.00 60.00 72.00 +10.99 -8.98 5.15 0.00 60.00 108.00 +10.98 -8.99 5.15 0.00 60.00 144.00 +10.98 -9.00 5.15 0.00 60.00 180.00 +10.98 -9.01 5.15 0.00 60.00 -144.01 +10.99 -9.02 5.15 0.00 60.00 -108.01 +11.01 -9.02 5.15 0.00 60.00 -72.01 +11.02 -9.01 5.15 0.00 60.00 -36.01 +11.10 -9.00 5.12 0.00 90.00 0.00 diff --git a/astrobee/behaviors/inspection/resources/soundsee_background_noise.txt b/astrobee/behaviors/inspection/resources/soundsee_background_noise.txt new file mode 100644 index 00000000..8ce6e8d5 --- /dev/null +++ b/astrobee/behaviors/inspection/resources/soundsee_background_noise.txt @@ -0,0 +1,48 @@ +# Scicam stations +11.019643 -8.20 4.28 0.00 0.00 180.00 +11.019643 -7.70 4.28 0.00 0.00 180.00 +11.019643 -7.70 4.78 0.00 0.00 180.00 +11.019643 -8.20 4.78 0.00 0.00 180.00 +11.019643 -8.20 5.28 0.00 0.00 180.00 +11.019643 -7.70 5.28 0.00 0.00 180.00 +# Soundsee stations +10.38 -8.20 4.08 0.00 0.00 180.00 +10.38 -8.05 4.08 0.00 0.00 180.00 +10.38 -7.90 4.08 0.00 0.00 180.00 +10.38 -7.75 4.08 0.00 0.00 180.00 +10.38 -7.60 4.08 0.00 0.00 180.00 +10.38 -7.60 4.22 0.00 0.00 180.00 +10.38 -7.75 4.22 0.00 0.00 180.00 +10.38 -7.90 4.22 0.00 0.00 180.00 +10.38 -8.05 4.22 0.00 0.00 180.00 +10.38 -8.20 4.22 0.00 0.00 180.00 +10.38 -8.20 4.38 0.00 0.00 180.00 +10.38 -8.05 4.38 0.00 0.00 180.00 +10.38 -7.90 4.38 0.00 0.00 180.00 +10.38 -7.75 4.38 0.00 0.00 180.00 +10.38 -7.60 4.38 0.00 0.00 180.00 +10.38 -7.60 4.52 0.00 0.00 180.00 +10.38 -7.75 4.52 0.00 0.00 180.00 +10.38 -7.90 4.52 0.00 0.00 180.00 +10.38 -8.05 4.52 0.00 0.00 180.00 +10.38 -8.20 4.52 0.00 0.00 180.00 +10.38 -8.20 4.68 0.00 0.00 180.00 +10.38 -8.05 4.68 0.00 0.00 180.00 +10.38 -7.90 4.68 0.00 0.00 180.00 +10.38 -7.75 4.68 0.00 0.00 180.00 +10.38 -7.60 4.68 0.00 0.00 180.00 +10.38 -7.60 4.82 0.00 0.00 180.00 +10.38 -7.75 4.82 0.00 0.00 180.00 +10.38 -7.90 4.82 0.00 0.00 180.00 +10.38 -8.05 4.82 0.00 0.00 180.00 +10.38 -8.20 4.82 0.00 0.00 180.00 +10.38 -8.20 4.98 0.00 0.00 180.00 +10.38 -8.05 4.98 0.00 0.00 180.00 +10.38 -7.90 4.98 0.00 0.00 180.00 +10.38 -7.75 4.98 0.00 0.00 180.00 +10.38 -7.60 4.98 0.00 0.00 180.00 +10.38 -7.60 5.12 0.00 0.00 180.00 +10.38 -7.75 5.12 0.00 0.00 180.00 +10.38 -7.90 5.12 0.00 0.00 180.00 +10.38 -8.05 5.12 0.00 0.00 180.00 +10.38 -8.20 5.12 0.00 0.00 180.00 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt b/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt deleted file mode 100644 index d3dd47a1..00000000 --- a/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt +++ /dev/null @@ -1,2 +0,0 @@ -# Panorama -11 -9 5.5 0 -90 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_panorama_geometry.txt b/astrobee/behaviors/inspection/resources/soundsee_panorama_geometry.txt new file mode 100644 index 00000000..5b307f9c --- /dev/null +++ b/astrobee/behaviors/inspection/resources/soundsee_panorama_geometry.txt @@ -0,0 +1,16 @@ +11.16 -8.97 5.45 130.89 -48.59 -139.11 +11.16 -8.99 5.44 160.58 -58.53 -163.26 +11.16 -9.01 5.44 -160.58 -58.53 163.26 +11.16 -9.03 5.45 -130.89 -48.59 139.11 +11.13 -8.95 5.40 106.74 -58.53 -109.42 +11.13 -8.98 5.39 134.56 -75.90 -135.44 +11.13 -9.02 5.39 -134.56 -75.90 135.44 +11.13 -9.06 5.40 -106.74 -58.53 109.42 +11.08 -8.93 5.37 73.26 -58.53 -70.57 +11.08 -8.98 5.35 45.43 -75.89 -44.56 +11.08 -9.03 5.35 -45.43 -75.89 44.56 +11.08 -9.08 5.37 -73.26 -58.53 70.57 +11.03 -8.92 5.35 49.11 -48.59 -40.89 +11.03 -8.97 5.33 19.42 -58.53 -16.74 +11.03 -9.03 5.34 -19.42 -58.53 16.74 +11.03 -9.09 5.36 -49.11 -48.59 40.89 From d18a9b0c9a200213795fd13bf3d0f036ad268a20 Mon Sep 17 00:00:00 2001 From: Marina Moreira Date: Wed, 29 Dec 2021 11:41:45 -0800 Subject: [PATCH 17/40] add libproj-dev to the install scripts --- scripts/setup/packages_bionic.lst | 3 ++- scripts/setup/packages_focal.lst | 3 ++- scripts/setup/packages_xenial.lst | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/scripts/setup/packages_bionic.lst b/scripts/setup/packages_bionic.lst index 8cdb6fa7..1ab3ee0b 100644 --- a/scripts/setup/packages_bionic.lst +++ b/scripts/setup/packages_bionic.lst @@ -2,4 +2,5 @@ doxygen python-catkin-tools ros-melodic-eigen-conversions ros-melodic-pcl-ros -libmnl-dev \ No newline at end of file +libmnl-dev +libproj-dev diff --git a/scripts/setup/packages_focal.lst b/scripts/setup/packages_focal.lst index df2c7115..87954901 100644 --- a/scripts/setup/packages_focal.lst +++ b/scripts/setup/packages_focal.lst @@ -4,4 +4,5 @@ python3-osrf-pycommon python3-rosdep ros-noetic-eigen-conversions ros-noetic-pcl-ros -libmnl-dev \ No newline at end of file +libmnl-dev +libproj-dev diff --git a/scripts/setup/packages_xenial.lst b/scripts/setup/packages_xenial.lst index 5e3e6ffb..2128f89a 100644 --- a/scripts/setup/packages_xenial.lst +++ b/scripts/setup/packages_xenial.lst @@ -2,4 +2,5 @@ doxygen python-catkin-tools ros-kinetic-eigen-conversions ros-kinetic-pcl-ros -libmnl-dev \ No newline at end of file +libmnl-dev +libproj-dev From fbc91b5223cc105c1c53e7f24eb8b2d338d18d6c Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Wed, 29 Dec 2021 17:17:05 -0800 Subject: [PATCH 18/40] update sci_cam tool for new apk (#13) * update sci_cam tool for new apk * Updated sci cam image commands * fixes while testing Co-authored-by: Katie Browne --- .../inspection/tools/sci_cam_tool.cc | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/astrobee/behaviors/inspection/tools/sci_cam_tool.cc b/astrobee/behaviors/inspection/tools/sci_cam_tool.cc index 28515bb8..0e5336b2 100644 --- a/astrobee/behaviors/inspection/tools/sci_cam_tool.cc +++ b/astrobee/behaviors/inspection/tools/sci_cam_tool.cc @@ -60,7 +60,7 @@ void publish_cmd(ros::NodeHandle & nh, ros::Publisher & cmd_pub, arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; arg.s = "{\"name\": \"" + cmdName + "\""; if (cmdVal != "") - arg.s += ", \"value\": \"" + cmdVal + "\""; + arg.s += ", " + cmdVal; arg.s += "}"; cmd_args.push_back(arg); @@ -86,15 +86,15 @@ int main(int argc, char *argv[]) { // Accepted commands std::vector cmd_names, cmd_vals; - cmd_names.push_back("takeSinglePicture"); cmd_vals.push_back(""); - cmd_names.push_back("turnOnContinuousPictureTaking"); cmd_vals.push_back(""); - cmd_names.push_back("turnOffContinuousPictureTaking"); cmd_vals.push_back(""); - cmd_names.push_back("turnOnSavingPicturesToDisk"); cmd_vals.push_back(""); - cmd_names.push_back("turnOffSavingPicturesToDisk"); cmd_vals.push_back(""); - cmd_names.push_back("setPreviewImageWidth"); cmd_vals.push_back("640"); - cmd_names.push_back("setPreviewImageType"); cmd_vals.push_back("color"); - cmd_names.push_back("setFocusDistance"); cmd_vals.push_back("0.39"); - cmd_names.push_back("setFocusMode"); cmd_vals.push_back("manual"); + cmd_names.push_back("takePicture"); cmd_vals.push_back(""); + cmd_names.push_back("setAutoExposure"); cmd_vals.push_back("\"auto\": true"); + cmd_names.push_back("setContinuousPictureTaking"); cmd_vals.push_back("\"continuous\": true"); + cmd_names.push_back("setFocusDistance"); cmd_vals.push_back("\"distance\": 0.39"); + cmd_names.push_back("setFocusMode"); cmd_vals.push_back("\"mode\": \"manual\""); + cmd_names.push_back("setPublishImage"); cmd_vals.push_back("\"publish\": true"); + cmd_names.push_back("setPublishedImageSize"); cmd_vals.push_back("\"width\": 640, \"height\": 480"); + cmd_names.push_back("setPublishedImageType"); cmd_vals.push_back("\"type\": \"color\""); + cmd_names.push_back("setSavePicturesToDisk"); cmd_vals.push_back("\"save\": true"); // Initialize a ros node ros::init(argc, argv, "sci_cam_tool", ros::init_options::AnonymousName); From c038896853a1e370f85532b000fcab07db409799 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Fri, 31 Dec 2021 10:27:32 -0800 Subject: [PATCH 19/40] fix install link (#17) --- INSTALL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTALL.md b/INSTALL.md index d51722b7..5a7788cd 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -33,7 +33,7 @@ At this point you need to decide where you'd like to put the ISAAC workspace and First, clone the flight software repository: - git clone --recursive ssh://git@babelfish.arc.nasa.gov:7999/isaac/isaac.git \ + git clone --recursive https://github.com/nasa/isaac.git \ --branch develop $ISAAC_WS/src/ Checkout the submodule: From 110eb8e4e6ecb4f970008e98c48d4c13ef59c7ec Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Tue, 4 Jan 2022 09:52:20 -0800 Subject: [PATCH 20/40] Add custom tf framestore (#15) * adding isaac tf config * adding soundsee transform from official data * adding custom isaac config path to be general * using framestore nodelet --- astrobee/behaviors/cargo/src/cargo_node.cc | 2 +- .../inspection/src/inspection_node.cc | 2 +- astrobee/hardware/wifi/src/wifi_node.cc | 2 +- astrobee/hardware/wifi/tools/wifi_tool.cc | 2 +- .../geometry_mapper/tools/streaming_mapper.cc | 4 +-- .../tools/RFID_mapper_node.cc | 2 +- .../tools/air_quality_mapper_node.cc | 2 +- .../tools/wifi_mapper_node.cc | 2 +- img_analysis/src/img_analysis_nodelet.cc | 2 +- img_analysis/src/img_vent.cc | 2 +- img_analysis/test/test_vent.test | 4 +-- .../config/dense_map/streaming_mapper.config | 6 ++-- isaac/config/transforms.config | 35 +++++++++++++++++++ isaac/launch/isaac_astrobee.launch | 9 +++++ isaac/launch/robot/ILP.launch | 12 +++++++ 15 files changed, 72 insertions(+), 16 deletions(-) create mode 100644 isaac/config/transforms.config diff --git a/astrobee/behaviors/cargo/src/cargo_node.cc b/astrobee/behaviors/cargo/src/cargo_node.cc index 960f608d..20e32568 100755 --- a/astrobee/behaviors/cargo/src/cargo_node.cc +++ b/astrobee/behaviors/cargo/src/cargo_node.cc @@ -321,7 +321,7 @@ class CargoNode : public ff_util::FreeFlyerNodelet { void Initialize(ros::NodeHandle* nh) { nh_ = nh; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) cfg_.SetPath(path); // Grab some configuration parameters for this node from the LUA config reader diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc index 92bd993b..2b23169e 100644 --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -247,7 +247,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { void Initialize(ros::NodeHandle* nh) { nh_ = nh; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) cfg_.SetPath(path); // Grab some configuration parameters for this node from the LUA config reader diff --git a/astrobee/hardware/wifi/src/wifi_node.cc b/astrobee/hardware/wifi/src/wifi_node.cc index c84b41a8..fb1ff813 100644 --- a/astrobee/hardware/wifi/src/wifi_node.cc +++ b/astrobee/hardware/wifi/src/wifi_node.cc @@ -53,7 +53,7 @@ class WifiNode : public ff_util::FreeFlyerNodelet { // Read the configuration config_reader::ConfigReader config_params; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) config_params.SetPath(path); config_params.AddFile("hw/wifi.config"); diff --git a/astrobee/hardware/wifi/tools/wifi_tool.cc b/astrobee/hardware/wifi/tools/wifi_tool.cc index f15cf2ea..5928591c 100644 --- a/astrobee/hardware/wifi/tools/wifi_tool.cc +++ b/astrobee/hardware/wifi/tools/wifi_tool.cc @@ -56,7 +56,7 @@ class WifiTool { // Read the configuration config_reader::ConfigReader config_params; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) config_params.SetPath(path); config_params.AddFile("hw/wifi.config"); diff --git a/dense_map/geometry_mapper/tools/streaming_mapper.cc b/dense_map/geometry_mapper/tools/streaming_mapper.cc index 5ec9ec61..c20e5ee1 100644 --- a/dense_map/geometry_mapper/tools/streaming_mapper.cc +++ b/dense_map/geometry_mapper/tools/streaming_mapper.cc @@ -179,8 +179,8 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { // Set the config path to ISAAC char* path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) - ROS_FATAL("Could not find the config path. Ensure that ISAAC_CONFIG_DIR was set."); + if ((path = getenv("CUSTOM_CONFIG_DIR")) == NULL) + ROS_FATAL("Could not find the config path. Ensure that CUSTOM_CONFIG_DIR was set."); config_params.SetPath(path); config_params.AddFile("dense_map/streaming_mapper.config"); diff --git a/dense_map/volumetric_mapper/tools/RFID_mapper_node.cc b/dense_map/volumetric_mapper/tools/RFID_mapper_node.cc index 614363eb..b23ddf78 100755 --- a/dense_map/volumetric_mapper/tools/RFID_mapper_node.cc +++ b/dense_map/volumetric_mapper/tools/RFID_mapper_node.cc @@ -44,7 +44,7 @@ class RFIDMapperNode{ nh_ = nh; // Set the config path to ISAAC char *path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) + if ((path = getenv("CUSTOM_CONFIG_DIR")) == NULL) ROS_FATAL("Could not find the config path."); config_params_.SetPath(path); diff --git a/dense_map/volumetric_mapper/tools/air_quality_mapper_node.cc b/dense_map/volumetric_mapper/tools/air_quality_mapper_node.cc index 3ccd2fff..6cb3c9ea 100755 --- a/dense_map/volumetric_mapper/tools/air_quality_mapper_node.cc +++ b/dense_map/volumetric_mapper/tools/air_quality_mapper_node.cc @@ -44,7 +44,7 @@ class AirQualityMapperNode{ nh_ = nh; // Set the config path to ISAAC char *path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) + if ((path = getenv("CUSTOM_CONFIG_DIR")) == NULL) ROS_FATAL("Could not find the config path."); config_params_.SetPath(path); diff --git a/dense_map/volumetric_mapper/tools/wifi_mapper_node.cc b/dense_map/volumetric_mapper/tools/wifi_mapper_node.cc index 14e1367d..b0b5602b 100755 --- a/dense_map/volumetric_mapper/tools/wifi_mapper_node.cc +++ b/dense_map/volumetric_mapper/tools/wifi_mapper_node.cc @@ -43,7 +43,7 @@ class WifiMapperNode{ nh_ = nh; // Set the config path to ISAAC char *path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) + if ((path = getenv("CUSTOM_CONFIG_DIR")) == NULL) ROS_FATAL("Could not find the config path."); config_params_.SetPath(path); diff --git a/img_analysis/src/img_analysis_nodelet.cc b/img_analysis/src/img_analysis_nodelet.cc index 5add9075..bb120467 100755 --- a/img_analysis/src/img_analysis_nodelet.cc +++ b/img_analysis/src/img_analysis_nodelet.cc @@ -76,7 +76,7 @@ class ImageAnalysisNode : public ff_util::FreeFlyerNodelet { protected: void Initialize(ros::NodeHandle* nh) { // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) cfg_.SetPath(path); // Grab some configuration parameters for this node from the LUA config reader diff --git a/img_analysis/src/img_vent.cc b/img_analysis/src/img_vent.cc index 2b225253..c2a7e99c 100644 --- a/img_analysis/src/img_vent.cc +++ b/img_analysis/src/img_vent.cc @@ -26,7 +26,7 @@ namespace img_analysis { // Read the configuration config_reader::ConfigReader config_params; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) config_params.SetPath(path); config_params.AddFile("anomaly/img_vent.config"); diff --git a/img_analysis/test/test_vent.test b/img_analysis/test/test_vent.test index 3075f30f..b2297b90 100644 --- a/img_analysis/test/test_vent.test +++ b/img_analysis/test/test_vent.test @@ -21,8 +21,8 @@ - + diff --git a/isaac/config/dense_map/streaming_mapper.config b/isaac/config/dense_map/streaming_mapper.config index 20d7bee3..34421c28 100644 --- a/isaac/config/dense_map/streaming_mapper.config +++ b/isaac/config/dense_map/streaming_mapper.config @@ -16,10 +16,10 @@ -- License for the specific language governing permissions and limitations -- under the License. --- Check that we have ISAAC_RESOURCE_DIR set -resource_dir = os.getenv("ISAAC_RESOURCE_DIR") +-- Check that we have CUSTOM_RESOURCE_DIR set +resource_dir = os.getenv("CUSTOM_RESOURCE_DIR") if resource_dir == nil then - io.stderr:write("ISAAC_RESOURCE_DIR not set, aborting.\n") + io.stderr:write("CUSTOM_RESOURCE_DIR not set, aborting.\n") os.exit() end diff --git a/isaac/config/transforms.config b/isaac/config/transforms.config new file mode 100644 index 00000000..ad7e015d --- /dev/null +++ b/isaac/config/transforms.config @@ -0,0 +1,35 @@ +-- Copyright (c) 2017, United States Government, as represented by the +-- Administrator of the National Aeronautics and Space Administration. +-- +-- All rights reserved. +-- +-- The Astrobee platform is licensed under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with the +-- License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +-- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +-- License for the specific language governing permissions and limitations +-- under the License. + +-- All static transforms in our system +transforms = { + + ------------ + -- LOCALS -- + ------------ + + -- SOUNDSEE CAMERA + + { global = false, parent = "body", child = "soundsee", + transform = transform(vec3(0.131585, 0.00185, 0.1068), quat4(0.500, -0.500, 0.500, -0.500) ) }, + + ------------- + -- GLOBALS -- + ------------- + + + } diff --git a/isaac/launch/isaac_astrobee.launch b/isaac/launch/isaac_astrobee.launch index 2f53eb88..2b5a3cd6 100644 --- a/isaac/launch/isaac_astrobee.launch +++ b/isaac/launch/isaac_astrobee.launch @@ -178,6 +178,15 @@ + + + + + diff --git a/isaac/launch/robot/ILP.launch b/isaac/launch/robot/ILP.launch index 10deca97..182b6920 100644 --- a/isaac/launch/robot/ILP.launch +++ b/isaac/launch/robot/ILP.launch @@ -64,6 +64,18 @@ + + + + + + + + + + + + From 7711867c971bff6d343c77d40bb81bdf80fcbf92 Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Tue, 4 Jan 2022 10:23:44 -0800 Subject: [PATCH 21/40] Camera refiner improvements (#18) * Documentation improvements * Minor wording fix * Fix for the order in which localization_node should be used to get the pose * refiner2: add consteraint that depth cloud stays close to mesh * refiner2: Do initial filtering * refiner2: Do initial filtering * refiner2: Filter after each pass by reprojection error and min triangulation angle * Move old orthoproject code to its own file. May be of use one day * refiner2: Project images with optimized cameras onto mesh * Refiner2: Minor style changes * geometry_mapper: Can do textures for individual images, for debugging * Refactoring and tuneup * refiner: Allow approximating a fisheye model with one having radial and tangential distortion * Can use a boundary exclusion region for nav cam in calibration * Indentation fix --- astrobee/behaviors/inspection/readme.md | 2 +- astrobee/simulation/acoustics_cam/readme.md | 4 +- dense_map/geometry_mapper/CMakeLists.txt | 4 + .../geometry_mapper/include/dense_map_utils.h | 33 +- .../include/texture_processing.h | 31 +- dense_map/geometry_mapper/readme.md | 359 +-- .../geometry_mapper/src/dense_map_utils.cc | 150 +- .../geometry_mapper/src/texture_processing.cc | 155 +- .../tools/camera_calibrator.cc | 6 +- .../geometry_mapper/tools/camera_refiner.cc | 10 +- .../geometry_mapper/tools/camera_refiner2.cc | 1988 +++++++++++------ .../tools/cameras_to_texrecon.py | 19 +- .../geometry_mapper/tools/geometry_mapper.cc | 11 +- .../geometry_mapper/tools/geometry_mapper.py | 47 +- .../geometry_mapper/tools/image_picker.cc | 55 +- .../geometry_mapper/tools/old_orthoproject.cc | 383 ++++ .../geometry_mapper/tools/orthoproject.cc | 435 +--- dense_map/geometry_mapper/tools/scale_bag.cc | 3 +- .../geometry_mapper/tools/streaming_mapper.cc | 81 +- .../config/dense_map/streaming_mapper.config | 13 +- isaac/readme.md | 10 +- 21 files changed, 2471 insertions(+), 1328 deletions(-) create mode 100644 dense_map/geometry_mapper/tools/old_orthoproject.cc diff --git a/astrobee/behaviors/inspection/readme.md b/astrobee/behaviors/inspection/readme.md index e280c7d6..0fa674c5 100644 --- a/astrobee/behaviors/inspection/readme.md +++ b/astrobee/behaviors/inspection/readme.md @@ -57,4 +57,4 @@ The pictures will be published on the topic They will also show up in the sci cam window in RVIZ. -If requests to take a single picture come at a high rate, some of them will be dropped. \ No newline at end of file +If requests to take a single picture come at a high rate, some of them will be dropped. diff --git a/astrobee/simulation/acoustics_cam/readme.md b/astrobee/simulation/acoustics_cam/readme.md index 4f960a17..24660f9d 100644 --- a/astrobee/simulation/acoustics_cam/readme.md +++ b/astrobee/simulation/acoustics_cam/readme.md @@ -85,7 +85,7 @@ intrinsics on topics: By default, the camera takes pictures as often as it can (see the configuration below), which is rarely, in fact, as it is slow. It -listens however to the topic +listens however to the topic: /comm/dds/command @@ -97,7 +97,7 @@ processed. ### Configuration -The behavior of this camera is described in +The behavior of this camera is described in: $ISAAC_WS/src/astrobee/simulation/acoustics_cam/acoustics_cam.json diff --git a/dense_map/geometry_mapper/CMakeLists.txt b/dense_map/geometry_mapper/CMakeLists.txt index 02c2a76d..7ddbc686 100644 --- a/dense_map/geometry_mapper/CMakeLists.txt +++ b/dense_map/geometry_mapper/CMakeLists.txt @@ -207,6 +207,10 @@ add_executable(orthoproject tools/orthoproject.cc) target_link_libraries(orthoproject geometry_mapper_lib gflags glog) +add_executable(old_orthoproject tools/old_orthoproject.cc) +target_link_libraries(old_orthoproject + geometry_mapper_lib gflags glog) + add_executable(streaming_mapper tools/streaming_mapper.cc) target_link_libraries(streaming_mapper geometry_mapper_lib gflags glog) diff --git a/dense_map/geometry_mapper/include/dense_map_utils.h b/dense_map/geometry_mapper/include/dense_map_utils.h index 8be7dff1..08c83499 100644 --- a/dense_map/geometry_mapper/include/dense_map_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_utils.h @@ -41,15 +41,24 @@ namespace dense_map { -const int NUM_SCALAR_PARAMS = 1; -const int NUM_OPT_CTR_PARAMS = 2; // optical center in x and y -const int NUM_RESIDUALS = 2; // Same as pixel size -const int NUM_XYZ_PARAMS = 3; -const int NUM_RIGID_PARAMS = 7; // (quaternion (4) + translation (3)) +const int NUM_SCALAR_PARAMS = 1; // Used to float single-value params // NOLINT +const int NUM_OPT_CTR_PARAMS = 2; // optical center in x and y // NOLINT +const int NUM_PIX_PARAMS = 2; // NOLINT +const int NUM_XYZ_PARAMS = 3; // NOLINT +const int NUM_RIGID_PARAMS = 7; // quaternion (4) + translation (3) // NOLINT +const int NUM_AFFINE_PARAMS = 12; // 3x3 matrix (9) + translation (3) // NOLINT // A function to split a string like 'optical_center focal_length' into // its two constituents. -void parse_intrinsics_to_float(std::string const& intrinsics_to_float, std::set& intrinsics_to_float_set); +void parse_intrinsics_to_float(std::string const& intrinsics_to_float, + std::set& intrinsics_to_float_set); + +// A function to split a string like 'haz_cam sci_cam' into +// its two constituents and validate against the list of known cameras. +void parse_extrinsics_to_float(std::vector const& cam_names, + std::string const& depth_to_image_name, + std::string const& extrinsics_to_float, + std::set& extrinsics_to_float_set); // Extract a rigid transform to an array of length NUM_RIGID_PARAMS void rigid_transform_to_array(Eigen::Affine3d const& aff, double* arr); @@ -58,6 +67,12 @@ void rigid_transform_to_array(Eigen::Affine3d const& aff, double* arr); // transform. Normalize the quaternion to make it into a rotation. void array_to_rigid_transform(Eigen::Affine3d& aff, const double* arr); +void affine_transform_to_array(Eigen::Affine3d const& aff, double* arr); +void array_to_affine_transform(Eigen::Affine3d& aff, const double* arr); + +// Convert a string of values separated by spaces to a vector of doubles. +std::vector string_to_vector(std::string const& str); + // Read a 4x4 pose matrix of doubles from disk void readPoseMatrix(cv::Mat& pose, std::string const& filename); @@ -100,12 +115,14 @@ void updateConfigFile Eigen::Affine3d const& haz_cam_depth_to_image_trans); // NOLINT // Given two poses aff0 and aff1, and 0 <= alpha <= 1, do linear interpolation. -Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, Eigen::Affine3d const& aff1); +Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, + Eigen::Affine3d const& aff1); // Given a set of poses indexed by timestamp in an std::map, find the // interpolated pose at desired timestamp. This is efficient // only for very small maps. Else use the StampedPoseStorage class. -bool findInterpPose(double desired_time, std::map const& poses, Eigen::Affine3d& interp_pose); +bool findInterpPose(double desired_time, std::map const& poses, + Eigen::Affine3d& interp_pose); // A class to store timestamped poses, implementing O(log(n)) linear // interpolation at a desired timestamp. For fast access, keep the diff --git a/dense_map/geometry_mapper/include/texture_processing.h b/dense_map/geometry_mapper/include/texture_processing.h index 65395966..373131d4 100644 --- a/dense_map/geometry_mapper/include/texture_processing.h +++ b/dense_map/geometry_mapper/include/texture_processing.h @@ -259,19 +259,36 @@ void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector bvh_tree, cv::Mat const& image, - camera::CameraModel const& cam, double num_exclude_boundary_pixels, +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, + double num_exclude_boundary_pixels, // outputs - std::vector& smallest_cost_per_face, std::vector& face_vec, + std::vector& smallest_cost_per_face, + std::vector& face_vec, std::map& uv_map); // Project texture on a texture model that was pre-filled already, so // only the texture pixel values need to be computed -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, - camera::CameraModel const& cam, std::vector& smallest_cost_per_face, double pixel_size, - int num_threads, std::vector const& face_projection_info, - std::vector& texture_atlases, tex::Model& model, cv::Mat& out_texture); +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, + double pixel_size, int num_threads, + std::vector const& face_projection_info, + std::vector& texture_atlases, tex::Model& model, + cv::Mat& out_texture); + +void meshProject(mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + cv::Mat const& image, + Eigen::Affine3d const& world_to_cam, + camera::CameraParameters const& cam_params, + int num_exclude_boundary_pixels, + std::string const& out_prefix); // Save a model void isaac_save_model(ObjModel* obj_model, std::string const& prefix); diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 38031f8d..769f3f2b 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -72,10 +72,42 @@ have different invocations for each case. The reader is advised to keep close attention to this, and we will make it clear at every step about which of the two paradigms one refers to. -## Additional sensors present only in simulation +## Sensors present in simulation -A number of robot sensors are present only as part of the simulator. -This is discussed further down the document. +The simulator supports the ``nav_cam``, ``sci_cam``, ``haz_cam`` +cameras, which are analogous to the ones on the real robot, and also +the ``heat_cam`` and ``acoustics_cam`` cameras which exist only in +simulation. All these have been tested with the geometry mapper and +streaming mapper. + +The ``sci_cam`` and ``haz_cam`` cameras are not enabled by default in +the simulator. To enable them, edit the simulation configuration, in + + $ASTROBEE_SOURCE_PATH/astrobee/config/simulation/simulation.config + +and set: + + haz_cam_rate = 1.0; + sci_cam_rate = 1.0; + sci_cam_continuous_picture_taking = true; + +The later will make sure sci cam pictures are taken automatically. If +custom behavior is desired, see: + + $ISAAC_WS/src/astrobee/behaviors/inspection/readme.md + +More information about the simulated nav_cam, haz_cam, and sci_cam is +at: + + $ASTROBEE_SOURCE_PATH/simulation/readme.md + +The heat camera is described in: + + $ISAAC_WS/src/astrobee/simulation/isaac_gazebo/readme.md + +The acoustics camera and how to enable it is documented at: + + $ISAAC_WS/src/astrobee/simulation/acoustics_cam/readme.md ## Compiling the software @@ -195,41 +227,8 @@ Copy the resulting bag off the robot. ### With simulated data -#### Cameras present in simulation - -The astrobee simulator supports a handful of cameras, including -``nav_cam``, ``sci_cam``, ``haz_cam``, ``heat_cam``, -``acoustics_cam``, etc. All these have been tested with the gemetry -mapper and streaming mapper. - -The ``sci_cam`` and ``haz_cam`` cameras are not enabled by default in -the simulator. To enable them, edit the simulation configuration, in - - $ASTROBEE_SOURCE_PATH/astrobee/config/simulation/simulation.config - -and set: - - haz_cam_rate = 1.0; - sci_cam_rate = 1.0; - sci_cam_continuous_picture_taking = true; - -The later will make sure sci cam pictures are taken automatically. If -custom behavior is desired, see: - - $ISAAC_WS/src/astrobee/behaviors/inspection/readme.md - -More information about the simulated nav_cam, haz_cam, and sci_cam is -at: - - $ASTROBEE_SOURCE_PATH/simulation/readme.md - -The heat camera is described in: - - $ISAAC_WS/src/astrobee/simulation/gazebo/readme.md - -The acoustics camera and how to enable it is documented at: - - $ISAAC_WS/src/astrobee/simulation/acoustics_cam/readme.md +The astrobee simulator supports a handful of cameras, mentioned +earlier in the text. #### Recording simulated data @@ -237,7 +236,7 @@ Start the simulator, such as: source $ASTROBEE_BUILD_PATH/devel/setup.bash source $ISAAC_WS/devel/setup.bash - roslaunch isaac sim.launch rviz:=true dds:=false \ + roslaunch isaac sim.launch rviz:=true \ pose:="11.0 -7.0 5.0 0 0 0 1" world:=iss In rviz turn on visualizing the desired cameras cameras from the @@ -718,9 +717,10 @@ Parameters: The default is in the usage above. --undistorted_crop_wins: The central region to keep after undistorting an image and before texturing. For sci_cam the - numbers are at 1/4th of the full resolution and will be adjusted - for the actual input image dimensions. Use a list in quotes. The - default is "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160". + numbers are at 1/4th of the full resolution (resolution of + calibration) and will be adjusted for the actual input image + dimensions. Use a list in quotes. The default is + "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160". --haz_cam_points_topic: The depth point cloud topic in the bag file. --start: How many seconds into the bag to start processing the data. --duration: For how many seconds to do the processing. @@ -814,6 +814,9 @@ Parameters: value of nav_cam_to_sci_cam_timestamp_offset from the robot config file with this value. --verbose: If specified, echo all output in the terminal. + --texture_individual_images: If specified, in addition to a joint texture + of all images create individual textures for each image and camera. Does + not work with simulated cameras. For debugging. --save_debug_data: If specified, save many intermediate datasets for debugging. @@ -978,35 +981,56 @@ This node will load the mesh produced by the geometry mapper from If an updated geometry mapper mesh exists, it should be copied to that location first. -It will listen to the robot body pose being published at: - - /gnc/ekf - The tool will read the configuration options from: $ISAAC_WS/src/isaac/config/dense_map/streaming_mapper.config -which specifies the camera image to texture. The image topic must be set, -which, for ``sci_cam`` is normally +By default, as specified in that file, it will listen to the robot +body pose being published at: + + /gnc/ekf + +That config file specifies the the camera image to texture, which, by +default, is ``sci_cam``. Its image topic must be set, which, for +``sci_cam`` is normally /hw/cam_sci/compressed -while for other ``haz_cam`` is: +while for ``haz_cam`` is: /hw/depth_haz/extended/amplitude_int -and for other cameras, such as ``nav_cam``, etc., it is of form: +For ``nav_cam`` the image topic is either + + /mgt/img_sampler/nav_cam/image_record + +if the images are played from a bag which recorded the images produced +with the image sampler, or /hw/cam_nav -For the sci cam, it also expects image exposure data on the topic: +if no image sampler was used. The rest of the cameras usually follow +the convention on the line above. + +For the sci cam, the streaming mapper also expects image exposure data +on the topic: /hw/sci_cam_exif -to do exposure correction, unlesss in simulation mode or +to do exposure correction, unless in simulation mode or ``sci_cam_exposure_correction`` is set to false. This is not needed for other cameras. +In order for the streaming mapper to produce texture it should receive +input information with its needed topics (robot or nav_cam pose and +the image to texture), either from a bag or from the robot in real +time. A data bag can be played with the usual command: + + rosbag play data.bag + +See the note further down if it is desired to rename the topics +on which some bag data is published. + The streaming mapper will publish several topics having texture information, which for ``sci_cam`` are: @@ -1016,14 +1040,11 @@ information, which for ``sci_cam`` are: and an analogous convention is followed for other cameras. -In order for the the streaming mapper to produce texture it should -receive input information with its needed topics, either from a bag or -from the robot in real time. - The header.stamp value for each published message will be the same as the header.stamp for the corresponding input camera image. -The data produced by the streaming mapper can be recorded with: +The data produced by the streaming mapper can be recorded (for +sci_cam, and analogously for other cameras) with: rosbag record /ism/sci_cam/img /ism/sci_cam/obj /ism/sci_cam/mtl \ -b 10000 @@ -1037,8 +1058,10 @@ calibration of the IMU sensor in addition to the nav, haz, and sci cam sensors, and very accurate knowledge of the pose of these sensors on the robot body. If that is not the case, it is suggested to use the nav cam pose via the ``nav_cam_pose_topic`` field in -streaming_mapper.config, for which only accurate calibration of the -nav, sci, and haz cameras among each other is assumed. +streaming_mapper.config (set it to ``/loc/ml/features``), for which +only accurate calibration of the nav, sci, and haz cameras among each +other is assumed, while the ``ekf_pose_topic`` must be set to an empty +string. The input texture can be in color or grayscale, at full or reduced resolution, and compressed or not. @@ -1048,57 +1071,13 @@ resolution, and compressed or not. If no robot body or nav cam pose information is present, for example, if the EKF or localization node was not running when the image data was acquired, or this data was not recorded or was not reliable, the -localization node can be started together with the streaming mapper by -modifying the above `roslaunch` command as follows: - - export ASTROBEE_ROBOT=bsharp2 - roslaunch isaac isaac_astrobee.launch llp:=disabled \ - nodes:=framestore,localization_node \ - streaming_mapper:=true output:=screen - -and then enabled by running in a separate terminal: - - rosservice call /loc/ml/enable true - -Alternatively, the streaming mapper can be started first, -without localization, as: - - export ASTROBEE_WORLD=iss - export ASTROBEE_ROBOT=bsharp2 # the robot name should be checked - source $ASTROBEE_BUILD_PATH/devel/setup.bash - source $ISAAC_WS/devel/setup.bash - export ISAAC_RESOURCE_DIR=${ISAAC_WS}/src/isaac/resources - export ISAAC_CONFIG_DIR=${ISAAC_WS}/src/isaac/config - roslaunch $ISAAC_WS/src/dense_map/geometry_mapper/launch/streaming_mapper.launch \ - sim_mode:=false output:=screen +localization node can be started together with the streaming mapper, +and this node will provide updated pose information. -Ensure then that nav_cam_pose_topic is set to /loc/ml/features in -streaming_mapper.config and that ekf_state_topic is empty. Also -check the robot name, as earlier. - -Then the localization node can be started separately, as: - - export ASTROBEE_ROBOT=bsharp2 - roslaunch astrobee astrobee.launch llp:=disabled \ - nodes:=framestore,localization_node output:=screen - -with the same environment, including the robot name, and then can be -enabled with a rosservice call as earlier. - -This node will expect the nav cam pose to be published on topic -``/hw/cam_nav``. If it is on a different topic, such as -``/mgt/img_sampler/nav_cam/image_record``, it needs to be redirected -to this one when playing the bag, such as: - - rosbag play data.bag \ - /mgt/img_sampler/nav_cam/image_record:=/hw/cam_nav \ - /loc/ml/features:=/tmp/features +Edit ``streaming_mapper.config`` and set ``nav_cam_pose_topic`` to +``/loc/ml/features`` and let ``ekf_state_topic`` be empty. -Above the /loc/ml/features topic which may exist in the bag is -redirected to /tmp/features since the currently started localization -node will create new such features. - -The localization node, if needed, will make use of a registered sparse +The localization node will make use of a registered sparse map with BRISK features, histogram equalization, and a vocabulary database to find the nav cam poses. The command for building such a BRISK map from a registered SURF map is: @@ -1126,6 +1105,36 @@ thresholds if the map has too few features. For more details on what the localization node expects, see build_map.md, in the section about running this node on a local machine. +Ensure the same environment as before is present, including the robot name, +and run: + + roslaunch isaac isaac_astrobee.launch mlp:=local llp:=disabled \ + nodes:=framestore,localization_node,localization_manager \ + streaming_mapper:=true output:=screen + +Wait until the textured model is created, which can take a minute. + +Then, in a different terminal, play the bag. The localization node will +expect the nav cam images to be published on topic ``/hw/cam_nav``. If +it is on a different topic, such as +``/mgt/img_sampler/nav_cam/image_record``, it needs to be redirected +to this one when playing the bag, such as: + + rosbag play --clock data.bag \ + /mgt/img_sampler/nav_cam/image_record:=/hw/cam_nav \ + /loc/ml/features:=/tmp/features /gnc/ekf:=/tmp/ekf + +Above the /loc/ml/features and /gnc/ekf topics which may exist in the +bag are redirected to temporary topics, since the currently started +localization node will create new camera pose information. + +The ``--clock`` option should not be missed. + +Then enable the localization node by running in a separate +terminal: + + rosservice call /loc/ml/enable true + #### The streaming mapper configuration file The ``streaming_mapper.config`` file has following fields: @@ -1140,11 +1149,12 @@ The ``streaming_mapper.config`` file has following fields: /loc/ml/features. It is published by the localization node. - texture_cam_type: The camera that creates the texture images (can be nav_cam, sci_cam, haz_cam, and in simulation also - heat_cam, and acoustics_cam). The default is sci_cam. This + heat_cam and acoustics_cam). The default is sci_cam. This field affects the name of the topics on which the streaming mapper publishes its output. - - texture_cam_topic: The topic having the texture to overlay. - The default value is /hw/cam_sci/compressed. + - texture_cam_topic: The topic having the images (texture) to + overlay. The default value is /hw/cam_sci/compressed and see + note in the text for other cameras. - dist_between_processed_cams: Once an image is textured and published, how far the camera should move (in meters) before it should process another texture (any images arriving @@ -1154,9 +1164,10 @@ The ``streaming_mapper.config`` file has following fields: to adjust for lightning differences, then apply the gamma transform back. This value should be set to the maximum observed ISO * exposure_time. The default is 5.1. Not used with simulated - data or when sci_cam_exposure_correction if false. + data, when sci_cam_exposure_correction if false, or other cameras + than sci_cam. - sci_cam_exposure_correction: If set to 'true', correct the - exposure of sci_cam images. + exposure of sci_cam images. Read exposures from /hw/sci_cam_exif. - use_single_texture: If set to 'true', use a single texture buffer. Sample the images by picking points on each triangle face with spacing pixel_size. This can take a couple of minutes to form @@ -1175,23 +1186,44 @@ simulation.config needs to be edited as described earlier in the document to turn on the simulated sci cam, haz cam, or other desired camera. -To launch the streaming mapper, one can do the following: +When working with ISS data, more specifically the JPM module, do: + + export ASTROBEE_WORLD=iss + /bin/cp -fv $ISAAC_WS/src/isaac/resources/geometry/iss_sim.ply \ + $ISAAC_WS/src/isaac/resources/geometry/${ASTROBEE_WORLD}.ply + +to use the simulated mesh for this module. + +Edit ``streaming_mapper.config`` and set: + + use_single_texture = false; + +Otherwise, the exiting simulated mesh has so many triangles that it +will overwhelm the size of the single buffer which is meant to fit all +textures. + +To launch the streaming mapper, do: source $ASTROBEE_BUILD_PATH/devel/setup.bash source $ISAAC_WS/devel/setup.bash export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss - roslaunch isaac sim.launch world:=iss rviz:=true \ - streaming_mapper:=true pose:="11.0 -7.0 5.0 0 0 0 1" + roslaunch isaac sim.launch world:=iss rviz:=true \ + streaming_mapper:=true pose:="11.0 -7.0 5.0 0 0 0 1" \ + output:=screen -Then the robot can be moved in order to create images to texture as: +It is suggested to read the documentation above for working with the +simulator in the context of the geometry mapper for more details, such +as how to ensure the desired camera images are seen in rviz. + +Then, the robot can be moved in order to create images to texture as: rosrun mobility teleop -move -pos "11.0 -5.0 5.0" -tolerance_pos 0.0001 \ -att "0 0 0 1" With simulated data the pose and intrinsics for each camera are -received directly from the simulator, on topics that, for example for +received directly from the simulator, on topics that, for example, for sci_cam, are: /sim/sci_cam/pose @@ -1200,7 +1232,11 @@ sci_cam, are: Hence, the parameters ekf_state_topic, ekf_pose_topic, and nav_cam_pose_topic are ignored. -Note that value of `ASTROBEE_ROBOT` is not needed in this case. +The streaming mapper will publish its results on topics mentioned +earlier in the text. + +Note that value of ``ASTROBEE_ROBOT`` is not needed in this case. Any +user-set value will be overwritten with the robot name ``sim``. ## Camera refinement @@ -1236,17 +1272,23 @@ sci cam image using the tool: --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ --sci_cam_topic /hw/cam_sci/compressed \ --haz_cam_intensity_topic /hw/depth_haz/extended/amplitude_int \ - --bracket_len 2.0 --output_nav_cam_dir nav_images + --bracket_len 0.6 --output_nav_cam_dir nav_images -Setting up the correct robot name above is very important. +Setting up the correct robot name for ASTROBEE_ROBOT is very important. The --bracket_len option brackets sci cam images by nav cam images so the length of time between these two nav cam images is at most this value. These nav cam images are saved. Later in camera_refiner sci cam and haz cam images are picked in each such a nav cam interval. (A -camera's time is first adjusted for the time offset before any of this -happens.) One may consider using a bracket length of 1.0 seconds if -the bot is known to move quickly right after taking a sci cam picture. +camera's time is first adjusted for the timestamp offset between the +cameras.) + +It is important to note that the bracket length can affect the accuracy +of calibration later, and hence it should be rather tight. Yet a tight +bracket does not allow for wiggle room if later it is desired to tweak +a little the timestamp offsets while still staying within the bounds, +and it may prevent bracketing all the sci cam images and enough haz +cam images. The option --nav_cam_to_sci_cam_offset_override_value can be used if the given bag is suspected to have a different value of this @@ -1259,7 +1301,7 @@ that there should be no excessive overlap otherwise, so the images in the first pair better have about 3/4 or 4/5 overlap with images from other pairs. -If necessary, add more intermediate images manually, or re-running +If necessary, add more intermediate images by re-running this tool with --max_time_between_images @@ -1351,6 +1393,10 @@ Here, $dir points to nav_images as earlier in the document. ### Running the refiner +TODO(oalexan1): Rewrite this for refiner2. Mention that the choice of +weights 1000, 0, 0 works well, and an extra tune-up can be achieved +for bsharp2 with 20, 20, 20. + Next, the refiner tool is run, as shown below. This will overwrite the camera calibration file, so it may be prudent to start by copying the existing calibration file to a new name, and set ASTROBEE_ROBOT to @@ -1364,7 +1410,7 @@ point to that. source $ISAAC_WS/devel/setup.bash $ISAAC_WS/devel/lib/geometry_mapper/camera_refiner \ --ros_bag mybag.bag --sparse_map mymap.map \ - --num_iterations 20 --bracket_len 2.0 \ + --num_iterations 20 --bracket_len 0.6 \ --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ --output_map output_map.map \ --fix_map --skip_registration --float_scale \ @@ -1374,7 +1420,12 @@ point to that. --mesh mesh.ply --mesh_weight 25 \ --mesh_robust_threshold 3 -Note how we used the same bracket length as in the image picker. +Note how we used the same bracket length as in the image picker. It +is very important to note that a tight bracket should be used, as, if +the robot moves fast and the bracket value is big, it can affect the +accuracy of calibration. Yet a tight bracket does not allow for wiggle +room if it is decided to vary the timestamp offset (see further down) +while staying within the bounds given by the bracket. This tool will print some statistics showing the reprojection residuals for all camera combinations. This can be helpful @@ -1481,6 +1532,50 @@ This program's options are: --verbose: Print the residuals and save the images and match files. Stereo Pipeline's viewer can be used for visualizing these. +### Using the refiner with a radtan model for nav_cam + +The camera refiner supports using a radtan distortion model for +nav_cam, that is a model with radial and and tangenetial distortion, +just like for haz_cam and sci_cam, but the default nav_cam distortion +model is fisheye. One can edit the robot config file and replace the +fisheye model with a desired radial + tangential distortion model (4 +or 5 coeffecients are needed) then run the refiner. + +Since it is not easy to find a good initial set of such coefficients, +the refiner has the option of computing such a model which best fits +the given fisheye model. For that, the refiner is started with the +fisheye model, this model is used to set up the problem, including +triangulating the 3D points after feature detection, then the fisheye +model is replaced on-the-fly with desired 4 or 5 coefficients of the +radtan model via the option --nav_cam_distortion_replacement, to which +one can pass, for example, "0 0 0 0". These coefficients will then be +optimized while keeping the rest of the variables fixed (nav cam focal +length and optical center, intrinsics of other cameras, and all the +extrinsics). The new best-fit distortion model will be written to disk +at the end, replacing the fisheye model, and from then on the new +model can be used for further calibration experiments just as with the +fisheye model. + +Since it is expected that fitting such a model is harder at the +periphery, where the distortion is stronger, the camera refiner has +the option --nav_cam_num_exclude_boundary_pixels can be used to +restrict the nav cam view to a central region of given dimensions when +such such optimization takes place (whether the new model type is fit +on the fly or read from disk when already determined). If a +satisfactory solution is found and it is desired to later use the +geometry mapper with such a model, note its option +``--undistorted_crop_wins``, and one should keep in mind that that the +restricted region specified earlier may not exactly be the region to +be used with the geomety mapper, since the former is specified in +distorted pixels and this one in undistorted pixels. + +All this logic was tested and was shown to work in a satisfactory way, +but no thorough attept was made at validating that a radtan distortion +model, while having more degrees of freedom, would out-perform the +fisheye model. That is rather unlikely, since given sufficiently many +images with good overlap, the effect of the peripherial region where +the fisheye lens distortion may not perform perfectly may be small. + ## Orthoprojecting images Given a camera image, its pose, and a mesh, a useful operation is to @@ -1506,6 +1601,12 @@ can be run as follows: --mesh geom_dir/simplified_mesh.ply \ --image geom_dir/distorted_sci_cam/1616785318.1400001.jpg \ --camera_to_world geom_dir/1616785318.1400001_sci_cam_to_world.txt \ - --output_dir out + --num_exclude_boundary_pixels 0 \ + --output_prefix out + +This will write out-1616785318.1400001.obj and its associated files. + +Ensure that the correct robot is specified in ASTROBEE_ROBOT. -This will write out/run.obj and its associated files. +Alternatively, the images and cameras can be specified in lists, via +``--image_list`` and ``--camera_list``. diff --git a/dense_map/geometry_mapper/src/dense_map_utils.cc b/dense_map/geometry_mapper/src/dense_map_utils.cc index f4e509ee..203c849e 100644 --- a/dense_map/geometry_mapper/src/dense_map_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_utils.cc @@ -55,6 +55,35 @@ void parse_intrinsics_to_float(std::string const& intrinsics_to_float, std::set< } } +// A function to split a string like 'haz_cam sci_cam depth_to_image' into +// its two constituents and validate against the list of known cameras. +void parse_extrinsics_to_float(std::vector const& cam_names, + std::string const& depth_to_image_name, + std::string const& extrinsics_to_float, + std::set& extrinsics_to_float_set) { + extrinsics_to_float_set.clear(); + std::string val; + std::istringstream is(extrinsics_to_float); + while (is >> val) { + extrinsics_to_float_set.insert(val); + + // Sanity check + bool known_cam = false; + for (size_t it = 0; it < cam_names.size(); it++) { + if (val == cam_names[it]) { + known_cam = true; + break; + } + } + if (val == depth_to_image_name) known_cam = true; + + if (!known_cam) + LOG(FATAL) << "Unknown camera: " << val << "\n"; + } + + return; +} + // Extract a rigid transform to an array of length NUM_RIGID_PARAMS void rigid_transform_to_array(Eigen::Affine3d const& aff, double* arr) { for (size_t it = 0; it < 3; it++) arr[it] = aff.translation()[it]; @@ -77,6 +106,36 @@ void array_to_rigid_transform(Eigen::Affine3d& aff, const double* arr) { aff = Eigen::Affine3d(Eigen::Translation3d(arr[0], arr[1], arr[2])) * Eigen::Affine3d(R); } +// Extract a affine transform to an array of length NUM_AFFINE_PARAMS +void affine_transform_to_array(Eigen::Affine3d const& aff, double* arr) { + Eigen::MatrixXd M = aff.matrix(); + int count = 0; + // The 4th row always has 0, 0, 0, 1 + for (int row = 0; row < 3; row++) { + for (int col = 0; col < 4; col++) { + arr[count] = M(row, col); + count++; + } + } +} + +// Convert an array of length NUM_AFFINE_PARAMS to a affine +// transform. Normalize the quaternion to make it into a rotation. +void array_to_affine_transform(Eigen::Affine3d& aff, const double* arr) { + Eigen::MatrixXd M = Eigen::Matrix::Identity(); + + int count = 0; + // The 4th row always has 0, 0, 0, 1 + for (int row = 0; row < 3; row++) { + for (int col = 0; col < 4; col++) { + M(row, col) = arr[count]; + count++; + } + } + + aff.matrix() = M; +} + // Read a 4x4 pose matrix of doubles from disk void readPoseMatrix(cv::Mat& pose, std::string const& filename) { pose = cv::Mat::zeros(4, 4, CV_64F); @@ -453,13 +512,23 @@ double fileNameToTimestamp(std::string const& file_name) { // Create a directory unless it exists already void createDir(std::string const& dir) { - if (!boost::filesystem::create_directories(dir) && !boost::filesystem::exists(dir)) { - LOG(FATAL) << "Failed to create directory: " << dir; + if (!boost::filesystem::create_directories(dir) && !boost::filesystem::is_directory(dir)) { + LOG(FATAL) << "Failed to create directory: " << dir << "\n"; } } // Minor utilities for converting values to a string below +// Convert a string of values separated by spaces to a vector of doubles. +std::vector string_to_vector(std::string const& str) { + std::istringstream iss(str); + std::vector vals; + double val; + while (iss >> val) + vals.push_back(val); + return vals; +} + std::string number_to_string(double val) { std::ostringstream oss; oss.precision(8); @@ -555,6 +624,44 @@ int robust_find(std::string const& text, std::string const& val, int beg) { return beg; } + +// A fragile function to determine if the value of the given parameter has a brace +bool param_val_has_braces(std::string const& param_name, std::string const& parent, + // The text to search + std::string const& text) { + int beg = 0; + + // First find the parent, if provided + if (parent != "") { + beg = robust_find(text, parent, beg); + if (beg == std::string::npos) LOG(FATAL) << "Could not find the field '" + << parent << " =' in the config file."; + } + + // Find the param after the parent + beg = robust_find(text, param_name, beg); + if (beg == std::string::npos) { + std::string msg; + if (parent != "") msg = " Tried to locate it after field '" + parent + "'."; + LOG(FATAL) << "Could not find the field '" << param_name << " =' in the config file." << msg; + } + + // Now we are positioned after the equal sign + bool has_brace = false; + while (beg < static_cast(text.size())) { + if (text[beg] == ' ') { + beg++; + continue; + } + + if (text[beg] == '{') has_brace = true; + + break; + } + + return has_brace; +} + // Replace a given parameter's value in the text. This is very fragile // code, particularly it does not understand that text starting with // "--" is a comment. @@ -568,7 +675,8 @@ void replace_param_val(std::string const& param_name, std::string const& param_v // First find the parent, if provided if (parent != "") { beg = robust_find(text, parent, beg); - if (beg == std::string::npos) LOG(FATAL) << "Could not find the field '" << parent << " =' in the config file."; + if (beg == std::string::npos) LOG(FATAL) << "Could not find the field '" + << parent << " =' in the config file."; } // Find the param after the parent @@ -586,7 +694,8 @@ void replace_param_val(std::string const& param_name, std::string const& param_v beg = text.find(beg_brace, beg); if (beg == std::string::npos) { - LOG(FATAL) << "Failed to replace value for " << parent << " " << param_name << " in the config file."; + LOG(FATAL) << "Failed to replace value for " << parent << " " << param_name + << " in the config file."; } end = beg + 1; @@ -603,10 +712,10 @@ void replace_param_val(std::string const& param_name, std::string const& param_v } } else { - // No braces, then just look for the next comma - end = text.find(",", beg); + // No braces, then just look for the next comma or newline + end = std::min(text.find(",", beg), text.find("\n", beg)); if (beg == std::string::npos) LOG(FATAL) << "Could not parse correctly " << param_name; - end--; // go before the comma + end--; // go before the found character } text = text.replace(beg, end - beg + 1, param_val); @@ -651,15 +760,26 @@ void updateConfigFile params.GetOpticalOffset()); replace_param_val("intrinsic_matrix", intrinsics, cam_name, "{", "}", text); Eigen::VectorXd cam_distortion = params.GetDistortion(); - if (cam_distortion.size() > 1) { - std::string distortion_str = vector_to_string(cam_distortion); - replace_param_val("distortion_coeff", distortion_str, cam_name, "{", "}", text); - } else if (cam_distortion.size() == 1) { - std::string distortion_str = " " + number_to_string(cam_distortion[0]); - replace_param_val("distortion_coeff", distortion_str, cam_name, "", "", text); - } else { + + // This can switch the distortion from being a number to being a vector, + // and vice-versa, which makes the logic more complicated. + std::string distortion_str; + if (cam_distortion.size() > 1) + distortion_str = vector_to_string(cam_distortion); + else if (cam_distortion.size() == 1) + distortion_str = number_to_string(cam_distortion[0]); + else LOG(FATAL) << "Camera " << cam_name << " must have distortion."; - } + + // Note that we look at whether there are braces around param val + // before replacement, rather than if the replacement param val + // has braces. + if (param_val_has_braces("distortion_coeff", cam_name, text)) + replace_param_val("distortion_coeff", distortion_str, cam_name, "{", "}", text); + else + replace_param_val("distortion_coeff", " " + distortion_str, cam_name, "", "", text); + + // Next deal with extrinsics if (cam_names[it] == "nav_cam") continue; // this will have the trivial transforms diff --git a/dense_map/geometry_mapper/src/texture_processing.cc b/dense_map/geometry_mapper/src/texture_processing.cc index 7f0ea9e8..595784ff 100644 --- a/dense_map/geometry_mapper/src/texture_processing.cc +++ b/dense_map/geometry_mapper/src/texture_processing.cc @@ -283,7 +283,8 @@ math::Vec3f eigen_to_vec3f(Eigen::Vector3d const& V) { return v; } -void calculate_texture_size(double height_factor, std::list const& texture_patches, +void calculate_texture_size(double height_factor, + std::list const& texture_patches, int& texture_width, int& texture_height) { int64_t total_area = 0; int max_width = 0; @@ -849,11 +850,6 @@ void formMtl(std::string const& out_prefix, std::string& mtl_str) { ofs << "Ns 0.000000\n"; ofs << "map_Kd " << out_prefix << ".png\n"; mtl_str = ofs.str(); - - std::string mtl_file = out_prefix + ".mtl"; - std::ofstream ofs2(mtl_file.c_str()); - ofs2 << mtl_str; - ofs2.close(); } void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector const& face_vec, @@ -944,11 +940,48 @@ void formObj(tex::Model& texture_model, std::string const& out_prefix, std::stri obj_str = out.str(); } +// The images from the bag may need to be resized to be the same +// size as in the calibration file. Sometimes the full-res images +// can be so blurry that interest point matching fails, hence the +// resizing. +// Similar logic to deal with differences between image size and calibrated size +// is used further down this code. +void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { + int raw_image_cols = image.cols; + int raw_image_rows = image.rows; + int calib_image_cols = cam_params.GetDistortedSize()[0]; + int calib_image_rows = cam_params.GetDistortedSize()[1]; + + int factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || + (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows + << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" + << "These must be equal up to an integer factor.\n"; + } + + if (factor != 1) { + // TODO(oalexan1): This kind of resizing may be creating aliased images. + cv::Mat local_image; + cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); + local_image.copyTo(image); + } + + // Check + if (image.cols != calib_image_cols || image.rows != calib_image_rows) + LOG(FATAL) << "Sci cam images have the wrong size."; +} + // Project texture and find the UV coordinates -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, double num_exclude_boundary_pixels, // outputs - std::vector& smallest_cost_per_face, std::vector& face_vec, + std::vector& smallest_cost_per_face, + std::vector& face_vec, std::map& uv_map) { // Wipe the outputs face_vec.clear(); @@ -966,9 +999,12 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b int factor = raw_image_cols / calib_image_cols; - if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { - LOG(FATAL) << "Published image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" - << "Calibrated image width and height are: " << calib_image_cols << ' ' << calib_image_rows << "\n" + if ((raw_image_cols != calib_image_cols * factor) || + (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Published image width and height are: " + << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" << "These must be equal up to an integer factor.\n"; } @@ -979,12 +1015,14 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b std::vector const& vertices = mesh->get_vertices(); std::vector const& mesh_normals = mesh->get_vertex_normals(); - if (vertices.size() != mesh_normals.size()) LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; + if (vertices.size() != mesh_normals.size()) + LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; std::vector const& faces = mesh->get_faces(); std::vector const& face_normals = mesh->get_face_normals(); - if (smallest_cost_per_face.size() != faces.size()) LOG(FATAL) << "There must be one cost value per face."; + if (smallest_cost_per_face.size() != faces.size()) + LOG(FATAL) << "There must be one cost value per face."; #pragma omp parallel for for (std::size_t face_id = 0; face_id < faces.size() / 3; face_id++) { @@ -1053,7 +1091,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b } // Get the undistorted pixel - Eigen::Vector2d undist_centered_pix = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); + Eigen::Vector2d undist_centered_pix = + cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); if (std::abs(undist_centered_pix[0]) > cam.GetParameters().GetUndistortedHalfSize()[0] || std::abs(undist_centered_pix[1]) > cam.GetParameters().GetUndistortedHalfSize()[1]) { // If we are out of acceptable undistorted region, there's some uncertainty whether @@ -1064,7 +1103,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Get the distorted pixel value Eigen::Vector2d dist_pix; - cam.GetParameters().Convert(undist_centered_pix, &dist_pix); + cam.GetParameters().Convert + (undist_centered_pix, &dist_pix); // Skip pixels that don't project in the image if (dist_pix.x() < num_exclude_boundary_pixels || @@ -1107,10 +1147,12 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Project texture using a texture model that was already pre-filled, so // just update pixel values -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, - camera::CameraModel const& cam, std::vector& smallest_cost_per_face, double pixel_size, +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, double pixel_size, int num_threads, std::vector const& face_projection_info, - std::vector& texture_atlases, tex::Model& model, cv::Mat& out_texture) { + std::vector& texture_atlases, tex::Model& model, + cv::Mat& out_texture) { omp_set_dynamic(0); // Explicitly disable dynamic teams omp_set_num_threads(num_threads); // Use this many threads for all // consecutive parallel regions @@ -1128,9 +1170,12 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b int calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; int factor = raw_image_cols / calib_image_cols; - if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { - LOG(FATAL) << "Published image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" - << "Calibrated image width and height are: " << calib_image_cols << ' ' << calib_image_rows << "\n" + if ((raw_image_cols != calib_image_cols * factor) || + (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Published image width and height are: " + << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" << "These must be equal up to an integer factor.\n"; } @@ -1143,18 +1188,21 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Blank the image texture->fill(0); - if (texture->channels() != NUM_CHANNELS) throw util::Exception("Wrong number of channels in the texture image."); + if (texture->channels() != NUM_CHANNELS) + throw util::Exception("Wrong number of channels in the texture image."); Eigen::Vector3d cam_ctr = cam.GetPosition(); std::vector const& vertices = mesh->get_vertices(); std::vector const& mesh_normals = mesh->get_vertex_normals(); - if (vertices.size() != mesh_normals.size()) LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; + if (vertices.size() != mesh_normals.size()) + LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; std::vector const& faces = mesh->get_faces(); std::vector const& face_normals = mesh->get_face_normals(); - if (smallest_cost_per_face.size() != faces.size()) LOG(FATAL) << "There must be one cost value per face."; + if (smallest_cost_per_face.size() != faces.size()) + LOG(FATAL) << "There must be one cost value per face."; #pragma omp parallel for for (std::size_t face_id = 0; face_id < faces.size() / 3; face_id++) { @@ -1222,7 +1270,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b } // Get the undistorted pixel - Eigen::Vector2d undist_centered_pix = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); + Eigen::Vector2d undist_centered_pix + = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); if (std::abs(undist_centered_pix[0]) > cam.GetParameters().GetUndistortedHalfSize()[0] || std::abs(undist_centered_pix[1]) > cam.GetParameters().GetUndistortedHalfSize()[1]) { // If we are out of acceptable undistorted region, there's some uncertainty whether @@ -1233,7 +1282,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Get the distorted pixel value Eigen::Vector2d dist_pix; - cam.GetParameters().Convert(undist_centered_pix, &dist_pix); + cam.GetParameters().Convert(undist_centered_pix, &dist_pix); // Skip pixels that don't project in the image if (dist_pix.x() < 0 || dist_pix.x() > calib_image_cols - 1 || dist_pix.y() < 0 || @@ -1360,4 +1410,57 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // std::cout << "Projecting took: " << timer.get_elapsed()/1000.0 << " // seconds\n"; } + +// Project and save a mesh as an obj file to out_prefix.obj, +// out_prefix.mtl, out_prefix.png. +void meshProject(mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + cv::Mat const& image, + Eigen::Affine3d const& world_to_cam, + camera::CameraParameters const& cam_params, + int num_exclude_boundary_pixels, + std::string const& out_prefix) { + // Create the output directory, if needed + std::string out_dir = boost::filesystem::path(out_prefix).parent_path().string(); + if (out_dir != "") dense_map::createDir(out_dir); + + std::vector face_vec; + std::map uv_map; + + std::vector const& faces = mesh->get_faces(); + int num_faces = faces.size(); + std::vector smallest_cost_per_face(num_faces, 1.0e+100); + + camera::CameraModel cam(world_to_cam, cam_params); + + // Find the UV coordinates and the faces having them + dense_map::projectTexture(mesh, bvh_tree, image, cam, num_exclude_boundary_pixels, + smallest_cost_per_face, face_vec, uv_map); + + // Strip the directory name, according to .obj file conventions. + std::string suffix = boost::filesystem::path(out_prefix).filename().string(); + + std::string obj_str; + dense_map::formObjCustomUV(mesh, face_vec, uv_map, suffix, obj_str); + + std::string mtl_str; + dense_map::formMtl(suffix, mtl_str); + + std::string obj_file = out_prefix + ".obj"; + std::cout << "Writing: " << obj_file << std::endl; + std::ofstream obj_handle(obj_file); + obj_handle << obj_str; + obj_handle.close(); + + std::string mtl_file = out_prefix + ".mtl"; + std::cout << "Writing: " << mtl_file << std::endl; + std::ofstream mtl_handle(mtl_file); + mtl_handle << mtl_str; + mtl_handle.close(); + + std::string texture_file = out_prefix + ".png"; + std::cout << "Writing: " << texture_file << std::endl; + cv::imwrite(texture_file, image); +} + } // namespace dense_map diff --git a/dense_map/geometry_mapper/tools/camera_calibrator.cc b/dense_map/geometry_mapper/tools/camera_calibrator.cc index 320f6fe3..7182ec14 100644 --- a/dense_map/geometry_mapper/tools/camera_calibrator.cc +++ b/dense_map/geometry_mapper/tools/camera_calibrator.cc @@ -232,7 +232,7 @@ struct IntrinsicsError { new IntrinsicsError(pix, xyz, block_sizes, orig_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -318,7 +318,7 @@ struct IntrinsicsDepthError { new IntrinsicsDepthError(pix, xyz, block_sizes, orig_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -395,7 +395,7 @@ struct ExtrinsicsError { new ExtrinsicsError(target_corner_pixel, target_corner_meas, interp_world_to_cam1, block_sizes, cam2_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index 2c3e83e1..207afb2c 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -240,7 +240,7 @@ struct DepthToHazError { new DepthToHazError(nav_pix, depth_xyz, block_sizes, haz_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -303,7 +303,7 @@ struct NavError { new ceres::DynamicNumericDiffCostFunction(new NavError(nav_pix, block_sizes, nav_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -406,7 +406,7 @@ struct DepthToNavError { new DepthToNavError(nav_pix, depth_xyz, alpha, match_left, block_sizes, nav_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -528,7 +528,7 @@ struct DepthToSciError { new DepthToSciError(dist_sci_pix, depth_xyz, alpha_haz, alpha_sci, block_sizes, sci_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -629,7 +629,7 @@ struct SciError { new SciError(dist_sci_pix, alpha, block_sizes, sci_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { diff --git a/dense_map/geometry_mapper/tools/camera_refiner2.cc b/dense_map/geometry_mapper/tools/camera_refiner2.cc index b61012b3..65e38b8c 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner2.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner2.cc @@ -17,18 +17,9 @@ * under the License. */ -// TODO(oalexan1): Consider adding a haz cam to haz cam -// reprojection error in the camera refiner. There will be more -// haz to haz matches than haz to nav or haz to sci. -// TODO(oalexan1): Must document that the cloud timestamp -// that is looked up is what is closest to image timestamp. -// TODO(oalexan1): What if the wrong cloud is looked up -// for given image? Or if the delay is too big? - -// TODO(oalexan1): Must test the DepthError with no bracketing. - -// TODO(oalexan1): Move this to utils -// Get rid of warning beyond our control +// TODO(oalexan1): Move these OpenMVG headers to dense_map_utils.cc, +// and do forward declarations in dense_map_utils.h. Get rid of +// warnings beyond our control #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #pragma GCC diagnostic ignored "-Wunused-function" #pragma GCC diagnostic push @@ -109,7 +100,7 @@ DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); -DEFINE_int32(num_overlaps, 20, "How many images (of all camera types) close and forward in " +DEFINE_int32(num_overlaps, 10, "How many images (of all camera types) close and forward in " "time to match to given image."); DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, @@ -125,11 +116,13 @@ DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calib DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by " "less than this."); -DEFINE_double(bracket_len, 2.0, +DEFINE_double(bracket_len, 0.6, "Lookup sci and haz cam images only between consecutive nav cam images " "whose distance in time is no more than this (in seconds), after adjusting " "for the timestamp offset between these cameras. It is assumed the robot " - "moves slowly and uniformly during this time."); + "moves slowly and uniformly during this time. A large value here will " + "make the refiner compute a poor solution but a small value will prevent " + "enough images being bracketed."); DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); @@ -137,11 +130,17 @@ DEFINE_string(sci_cam_timestamps, "", "Use only these sci cam timestamps. Must be " "a file with one timestamp per line."); -DEFINE_bool(float_sparse_map, false, "Optimize the sparse map, hence only the camera params."); +DEFINE_bool(float_sparse_map, false, + "Optimize the sparse map. This should be avoided as it can invalidate the scales " + "of the extrinsics and the registration. It should at least be used with big mesh " + "weights to attenuate those effects."); DEFINE_bool(float_scale, false, - "If to optimize the scale of the clouds (use it if the " - "sparse map is kept fixed), or else rescaling and registration is needed."); + "If to optimize the scale of the clouds, part of haz_cam_depth_to_image_transform " + "(use it if the sparse map is kept fixed, or else rescaling and registration " + "of the map and extrinsics is needed). This parameter should not be used with " + "--affine_depth_to_image when the transform is affine, rather than rigid and a scale." + "See also --extrinsics_to_float."); DEFINE_bool(float_timestamp_offsets, false, "If to optimize the timestamp offsets among the cameras."); @@ -165,81 +164,126 @@ DEFINE_string(sci_cam_intrinsics_to_float, "", "optical_center, distortion. Specify as a quoted list. " "For example: 'focal_length optical_center'."); +DEFINE_string(extrinsics_to_float, "haz_cam sci_cam depth_to_image", + "Specify the cameras whose extrinsics, relative to nav_cam, to optimize. Also " + "consider if to float haz_cam_depth_to_image_transform."); + DEFINE_double(nav_cam_to_sci_cam_offset_override_value, std::numeric_limits::quiet_NaN(), "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); -DEFINE_double(depth_weight, 10.0, - "The weight to give to depth measurements. Use a bigger number as " - "depth errors are usually small fractions of a meter."); +DEFINE_double(depth_tri_weight, 1000.0, + "The weight to give to the constraint that depth measurements agree with " + "triangulated points. Use a bigger number as depth errors are usually on the " + "order of 0.01 meters while reprojection errors are on the order of 1 pixel."); DEFINE_string(mesh, "", - "Refine the sci cam so that the sci cam texture agrees with the nav cam " - "texture when projected on this mesh."); + "Use this geometry mapper mesh from a previous run to help constrain " + "the calibration."); + +DEFINE_double(mesh_tri_weight, 0.0, + "A larger value will give more weight to the constraint that triangulated points. " + "stay close to a preexisting mesh. Not suggested by default."); + +DEFINE_double(depth_mesh_weight, 0.0, + "A larger value will give more weight to the constraint that the depth clouds " + "stay close to the mesh. Not suggested by default."); + +DEFINE_bool(affine_depth_to_image, false, "Assume that the depth_to_image_transform " + "for each depth + image camera is an arbitrary affine transform rather than a " + "rotation times a scale."); + +DEFINE_int32(refiner_num_passes, 2, "How many passes of optimization to do. Outliers will be " + "removed after every pass. Each pass will start with the previously optimized " + "solution as an initial guess. Mesh intersections (if applicable) and ray " + "triangulation will be recomputed before each pass."); + +DEFINE_double(initial_max_reprojection_error, 300.0, "If filtering outliers, remove interest " + "points for which the reprojection error, in pixels, is larger than this. This " + "filtering happens when matches are created, before cameras are optimized, and " + "a big value should be used if the initial cameras are not trusted."); + +DEFINE_double(max_reprojection_error, 25.0, "If filtering outliers, remove interest points for " + "which the reprojection error, in pixels, is larger than this. This filtering " + "happens after the optimization pass finishes unless disabled. It is better to not " + "filter too aggressively unless confident of the solution."); + +DEFINE_double(refiner_min_angle, 0.5, "If filtering outliers, remove triangulated points " + "for which all rays converging to it make an angle (in degrees) less than this." + "Note that some cameras in the rig may be very close to each other relative to " + "the points the rig images, so care is needed here."); + +DEFINE_bool(refiner_skip_filtering, false, "Do not do any outlier filtering."); + +DEFINE_double(min_ray_dist, 0.0, "The minimum search distance from a starting point along a ray " + "when intersecting the ray with a mesh, in meters (if applicable)."); + +DEFINE_double(max_ray_dist, 100.0, "The maximum search distance from a starting point along a ray " + "when intersecting the ray with a mesh, in meters (if applicable)."); -DEFINE_double(mesh_weight, 25.0, - "A larger value will give more weight to the mesh constraint. " - "Use a bigger number as depth errors are usually small fractions of a meter."); +DEFINE_string(out_texture_dir, "", "If non-empty and if an input mesh was provided, " + "project the camera images using the optimized poses onto the mesh " + "and write the obtained .obj files in the given directory."); DEFINE_bool(verbose, false, "Print the residuals and save the images and match files." "Stereo Pipeline's viewer can be used for visualizing these."); +DEFINE_string(nav_cam_distortion_replacement, "", + "Replace nav_cam's distortion coefficients with this list after the initial " + "determination of triangulated points, and then continue with distortion " + "optimization. A quoted list of four or five values expected, separated by " + "spaces, as the replacement distortion model will have radial and tangential " + "coefficients. Set a positive --nav_cam_num_exclude_boundary_pixels."); + +DEFINE_int32(nav_cam_num_exclude_boundary_pixels, 0, + "Flag as outliers nav cam pixels closer than this to the boundary, and ignore " + "that boundary region when texturing with the --out_texture_dir option. " + "This may improve the calibration accuracy, especially if switching from fisheye " + "to radtan distortion for nav_cam. See also the geometry_mapper " + "--undistorted_crop_wins option."); + namespace dense_map { - // Calculate interpolated world to camera trans - Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, - const double* end_world_to_ref_t, - const double* ref_to_cam_trans, - double beg_ref_stamp, - double end_ref_stamp, - double ref_to_cam_offset, - double cam_stamp) { - Eigen::Affine3d beg_world_to_ref_aff; - array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); - - Eigen::Affine3d end_world_to_ref_aff; - array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); - - Eigen::Affine3d ref_to_cam_aff; - array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); - - // std::cout.precision(18); - // std::cout << "--beg stamp " << beg_ref_stamp << std::endl; - // std::cout << "--end stamp " << end_ref_stamp << std::endl; - // std::cout << "cam stamp " << cam_stamp << std::endl; - // std::cout.precision(18); - // std::cout << "ref to cam off " << ref_to_cam_offset << std::endl; - // std::cout << "--ref to cam trans\n" << ref_to_cam_aff.matrix() << std::endl; - - // Covert from cam time to ref time and normalize. It is very - // important that below we subtract the big numbers from each - // other first, which are the timestamps, then subtract whatever - // else is necessary. Otherwise we get problems with numerical - // precision with CERES. - double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) - / (end_ref_stamp - beg_ref_stamp); - - if (beg_ref_stamp == end_ref_stamp) - alpha = 0.0; // handle division by zero - - // std::cout << "--alpha " << alpha << std::endl; - if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; - - // Interpolate at desired time - Eigen::Affine3d interp_world_to_ref_aff - = dense_map::linearInterp(alpha, beg_world_to_ref_aff, end_world_to_ref_aff); - - Eigen::Affine3d interp_world_to_cam_afff = ref_to_cam_aff * interp_world_to_ref_aff; - - // std::cout << "final trans\n" << interp_world_to_cam_afff.matrix() << std::endl; - - return interp_world_to_cam_afff; - } +// Calculate interpolated world to camera trans +Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, + const double* end_world_to_ref_t, + const double* ref_to_cam_trans, + double beg_ref_stamp, + double end_ref_stamp, + double ref_to_cam_offset, + double cam_stamp) { + Eigen::Affine3d beg_world_to_ref_aff; + array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); + + Eigen::Affine3d end_world_to_ref_aff; + array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); + + Eigen::Affine3d ref_to_cam_aff; + array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); + + // Covert from cam time to ref time and normalize. It is very + // important that below we subtract the big numbers from each + // other first, which are the timestamps, then subtract whatever + // else is necessary. Otherwise we get problems with numerical + // precision with CERES. + double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) + / (end_ref_stamp - beg_ref_stamp); - // TODO(oalexan1): Store separately matches which end up being - // squashed in a pid_cid_to_fid. + if (beg_ref_stamp == end_ref_stamp) + alpha = 0.0; // handle division by zero + + if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; + + // Interpolate at desired time + Eigen::Affine3d interp_world_to_ref_aff + = dense_map::linearInterp(alpha, beg_world_to_ref_aff, end_world_to_ref_aff); + + Eigen::Affine3d interp_world_to_cam_aff = ref_to_cam_aff * interp_world_to_ref_aff; + + return interp_world_to_cam_aff; +} ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { // Convert to lower-case @@ -288,7 +332,6 @@ struct BracketedCamError { } // Set the correct distortion size. This cannot be done in the interface for now. - // TODO(oalexan1): Make individual block sizes for each camera, then this issue will go away. m_block_sizes[7] = m_cam_params.GetDistortion().size(); } @@ -304,7 +347,6 @@ struct BracketedCamError { // World point Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - // std::cout << "--bracketX is " << X.transpose() << std::endl; // Make a deep copy which we will modify camera::CameraParameters cam_params = m_cam_params; @@ -316,15 +358,9 @@ struct BracketedCamError { cam_params.SetOpticalOffset(optical_center); cam_params.SetDistortion(distortion); - // std::cout << "--focal vector " << focal_vector.transpose() << std::endl; - // std::cout << "--opt ctr " << optical_center.transpose() << std::endl; - // std::cout << "-dist " << distortion.transpose() << std::endl; - // Convert world point to given cam coordinates X = world_to_cam_trans * X; - // std::cout << "--trans X " << X.transpose() << std::endl; - // Project into the image Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); Eigen::Vector2d curr_dist_pix; @@ -334,7 +370,6 @@ struct BracketedCamError { residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; - // std::cout << "--residuals " << residuals[0] << ' ' << residuals[1] << std::endl; return true; } @@ -348,7 +383,7 @@ struct BracketedCamError { (new BracketedCamError(meas_dist_pix, left_ref_stamp, right_ref_stamp, cam_stamp, block_sizes, cam_params)); - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add, except // for distortion, which is last @@ -394,9 +429,6 @@ struct RefCamError { } // Set the correct distortion size. This cannot be done in the interface for now. - - // TODO(oalexan1): Make individual block sizes for each camera, - // then this issue will go away. m_block_sizes[4] = m_cam_params.GetDistortion().size(); } @@ -408,7 +440,6 @@ struct RefCamError { // World point Eigen::Vector3d X; for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[1][it]; - // std::cout << "--refX is " << X.transpose() << std::endl; // Make a deep copy which we will modify camera::CameraParameters cam_params = m_cam_params; @@ -420,15 +451,9 @@ struct RefCamError { cam_params.SetOpticalOffset(optical_center); cam_params.SetDistortion(distortion); - // std::cout << "--focal vector " << focal_vector.transpose() << std::endl; - // std::cout << "--opt ctr " << optical_center.transpose() << std::endl; - // std::cout << "-dist " << distortion.transpose() << std::endl; - // Convert world point to given cam coordinates X = world_to_ref_t * X; - // std::cout << "--trans X " << X.transpose() << std::endl; - // Project into the image Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); Eigen::Vector2d curr_dist_pix; @@ -438,7 +463,6 @@ struct RefCamError { residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; - // std::cout << "--residuals " << residuals[0] << ' ' << residuals[1] << std::endl; return true; } @@ -452,7 +476,7 @@ struct RefCamError { (new RefCamError(meas_dist_pix, block_sizes, cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add, except // for distortion, which is last @@ -492,7 +516,7 @@ struct BracketedDepthError { m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || - m_block_sizes[3] != NUM_RIGID_PARAMS || + (m_block_sizes[3] != NUM_RIGID_PARAMS && m_block_sizes[3] != NUM_AFFINE_PARAMS) || m_block_sizes[4] != NUM_SCALAR_PARAMS || m_block_sizes[5] != NUM_XYZ_PARAMS || m_block_sizes[6] != NUM_SCALAR_PARAMS) { @@ -513,34 +537,27 @@ struct BracketedDepthError { // The current transform from the depth point cloud to the camera image Eigen::Affine3d depth_to_image; - array_to_rigid_transform(depth_to_image, parameters[3]); + if (m_block_sizes[3] == NUM_AFFINE_PARAMS) + array_to_affine_transform(depth_to_image, parameters[3]); + else + array_to_rigid_transform(depth_to_image, parameters[3]); // Apply the scale double depth_to_image_scale = parameters[4][0]; depth_to_image.linear() *= depth_to_image_scale; - // std::cout << "--depth to image:\n" << depth_to_image.matrix() << std::endl; - - // std::cout << "--meas pt " << m_meas_depth_xyz.transpose() << std::endl; // Convert from depth cloud coordinates to cam coordinates Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - // std::cout << "--image meas pt " << M.transpose() << std::endl; - // Convert to world coordinates M = world_to_cam_trans.inverse() * M; - // std::cout << "--depth in world coords " << M.transpose() << std::endl; // Triangulated world point Eigen::Vector3d X(parameters[5][0], parameters[5][1], parameters[5][2]); - // std::cout << "--triangulated X is " << X.transpose() << std::endl; - - // std::cout << "--weight " << m_weight << std::endl; // Compute the residuals for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { residuals[it] = m_weight * (X[it] - M[it]); - // std::cout << "--residual " << residuals[it] << std::endl; } return true; @@ -572,6 +589,102 @@ struct BracketedDepthError { std::vector m_block_sizes; }; // End class BracketedDepthError +// An error function minimizing the product of a given weight and the +// error between a mesh point and a transformed measured depth point. The +// depth point needs to be transformed to world coordinates first. For +// that one has to do pose interpolation. +struct BracketedDepthMeshError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + BracketedDepthMeshError(double weight, + Eigen::Vector3d const& meas_depth_xyz, + Eigen::Vector3d const& mesh_xyz, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_mesh_xyz(mesh_xyz), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 6 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || + (m_block_sizes[3] != NUM_RIGID_PARAMS && m_block_sizes[3] != NUM_AFFINE_PARAMS) || + m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "BracketedDepthMeshError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Current world to camera transform + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[5][0], // ref_to_cam_offset + m_cam_stamp); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + if (m_block_sizes[3] == NUM_AFFINE_PARAMS) + array_to_affine_transform(depth_to_image, parameters[3]); + else + array_to_rigid_transform(depth_to_image, parameters[3]); + + // Apply the scale + double depth_to_image_scale = parameters[4][0]; + depth_to_image.linear() *= depth_to_image_scale; + + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; + + // Convert to world coordinates + M = world_to_cam_trans.inverse() * M; + + // Compute the residuals + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (m_mesh_xyz[it] - M[it]); + } + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(double weight, + Eigen::Vector3d const& meas_depth_xyz, + Eigen::Vector3d const& mesh_xyz, + double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedDepthMeshError(weight, meas_depth_xyz, mesh_xyz, + left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + for (size_t i = 0; i < block_sizes.size(); i++) + cost_function->AddParameterBlock(block_sizes[i]); + + return cost_function; + } + + private: + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + Eigen::Vector3d m_mesh_xyz; // Point on preexisting mesh + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp + std::vector m_block_sizes; +}; // End class BracketedDepthMeshError + // An error function minimizing the product of a given weight and the // error between a triangulated point and a measured depth point for // the ref camera. The depth point needs to be transformed to world @@ -584,8 +697,10 @@ struct RefDepthError { m_meas_depth_xyz(meas_depth_xyz), m_block_sizes(block_sizes) { // Sanity check - if (m_block_sizes.size() != 4 || m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_SCALAR_PARAMS || + if (m_block_sizes.size() != 4 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + (m_block_sizes[1] != NUM_RIGID_PARAMS && m_block_sizes[1] != NUM_AFFINE_PARAMS) || + m_block_sizes[2] != NUM_SCALAR_PARAMS || m_block_sizes[3] != NUM_XYZ_PARAMS) { LOG(FATAL) << "RefDepthError: The block sizes were not set up properly.\n"; } @@ -599,34 +714,27 @@ struct RefDepthError { // The current transform from the depth point cloud to the camera image Eigen::Affine3d depth_to_image; - array_to_rigid_transform(depth_to_image, parameters[1]); + if (m_block_sizes[1] == NUM_AFFINE_PARAMS) + array_to_affine_transform(depth_to_image, parameters[1]); + else + array_to_rigid_transform(depth_to_image, parameters[1]); // Apply the scale double depth_to_image_scale = parameters[2][0]; depth_to_image.linear() *= depth_to_image_scale; - // std::cout << "--depth to image:\n" << depth_to_image.matrix() << std::endl; - - // std::cout << "--meas pt " << m_meas_depth_xyz.transpose() << std::endl; // Convert from depth cloud coordinates to cam coordinates Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - // std::cout << "--image meas pt " << M.transpose() << std::endl; - // Convert to world coordinates M = world_to_cam.inverse() * M; - // std::cout << "--depth in world coords " << M.transpose() << std::endl; // Triangulated world point Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - // std::cout << "--triangulated X is " << X.transpose() << std::endl; - - // std::cout << "--weight " << m_weight << std::endl; // Compute the residuals for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { residuals[it] = m_weight * (X[it] - M[it]); - // std::cout << "--residual " << residuals[it] << std::endl; } return true; @@ -669,7 +777,8 @@ struct XYZError { // TODO(oalexan1): May want to use the analytical Ceres cost function bool operator()(double const* const* parameters, double* residuals) const { // Compute the residuals - for (int it = 0; it < NUM_XYZ_PARAMS; it++) residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); + for (int it = 0; it < NUM_XYZ_PARAMS; it++) + residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); return true; } @@ -679,7 +788,8 @@ struct XYZError { std::vector const& block_sizes, double weight) { ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction(new XYZError(ref_xyz, block_sizes, weight)); + new ceres::DynamicNumericDiffCostFunction + (new XYZError(ref_xyz, block_sizes, weight)); // The residual size is always the same cost_function->SetNumResiduals(NUM_XYZ_PARAMS); @@ -697,8 +807,6 @@ struct XYZError { double m_weight; }; // End class XYZError -enum imgType { NAV_CAM, SCI_CAM, HAZ_CAM }; - // Calculate the rmse residual for each residual type. void calc_median_residuals(std::vector const& residuals, std::vector const& residual_names, @@ -715,7 +823,7 @@ void calc_median_residuals(std::vector const& residuals, for (size_t it = 0; it < residuals.size(); it++) stats[residual_names[it]].push_back(std::abs(residuals[it])); - std::cout << "The 25, 50 and 75 percentile residual stats " << tag << std::endl; + std::cout << "The 25, 50, 75, and 100th percentile residual stats " << tag << std::endl; for (auto it = stats.begin(); it != stats.end(); it++) { std::string const& name = it->first; std::vector vals = stats[name]; // make a copy @@ -726,44 +834,18 @@ void calc_median_residuals(std::vector const& residuals, int it1 = static_cast(0.25 * len); int it2 = static_cast(0.50 * len); int it3 = static_cast(0.75 * len); + int it4 = static_cast(len - 1); if (len == 0) std::cout << name << ": " << "none"; else std::cout << std::setprecision(8) - << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3]; + << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' + << vals[it3] << ' ' << vals[it4]; std::cout << " (" << len << " residuals)" << std::endl; } } - // TODO(oalexan1): This needs to be handled better. - void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { - int raw_image_cols = image.cols; - int raw_image_rows = image.rows; - int calib_image_cols = cam_params.GetDistortedSize()[0]; - int calib_image_rows = cam_params.GetDistortedSize()[1]; - - int factor = raw_image_cols / calib_image_cols; - - if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { - LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" - << "Calibrated image width and height are: " - << calib_image_cols << ' ' << calib_image_rows << "\n" - << "These must be equal up to an integer factor.\n"; - } - - if (factor != 1) { - // TODO(oalexan1): This kind of resizing may be creating aliased images. - cv::Mat local_image; - cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); - local_image.copyTo(image); - } - - // Check - if (image.cols != calib_image_cols || image.rows != calib_image_rows) - LOG(FATAL) << "Sci cam images have the wrong size."; - } - typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; // A class to encompass all known information about a camera @@ -775,12 +857,17 @@ void calc_median_residuals(std::vector const& residuals, int camera_type; // The timestamp for this camera (in floating point seconds since epoch) + // measured by the clock/cpu which is particular to this camera. double timestamp; // The timestamp with an adjustment added to it to be in // reference camera time double ref_timestamp; + // The timestamp of the closest cloud for this image, measured + // with the same clock as the 'timestamp' value. + double cloud_timestamp; + // Indices to look up the reference cameras bracketing this camera // in time. The two indices will have same value if and only if // this is a reference camera. @@ -790,11 +877,12 @@ void calc_median_residuals(std::vector const& residuals, // The image for this camera, in grayscale cv::Mat image; - // The corresponding depth cloud, for an image + depth camera + // The corresponding depth cloud, for an image + depth camera. + // Will be empty and uninitialized for a camera lacking depth. cv::Mat depth_cloud; }; - // Sort by timestamps in the ref camera clock + // Sort by timestamps adjusted to be relative to the ref camera clock bool timestampLess(cameraImage i, cameraImage j) { return (i.ref_timestamp < j.ref_timestamp); } @@ -830,6 +918,45 @@ void calc_median_residuals(std::vector const& residuals, return true; } + // Project given images with optimized cameras onto mesh. It is + // assumed that the most up-to-date cameras were copied/interpolated + // form the optimizer structures into the world_to_cam vector. + void meshProjectCameras(std::vector const& cam_names, + std::vector const& cam_params, + std::vector const& cam_images, + std::vector const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + int ref_camera_type, int nav_cam_num_exclude_boundary_pixels, + std::string const& out_dir) { + if (cam_names.size() != cam_params.size()) + LOG(FATAL) << "There must be as many camera names as sets of camera parameters.\n"; + if (cam_images.size() != world_to_cam.size()) + LOG(FATAL) << "There must be as many camera images as camera poses.\n"; + if (out_dir.empty()) + LOG(FATAL) << "The output directory is empty.\n"; + + char filename_buffer[1000]; + + for (size_t cid = 0; cid < cam_images.size(); cid++) { + double timestamp = cam_images[cid].timestamp; + int cam_type = cam_images[cid].camera_type; + + int num_exclude_boundary_pixels = 0; + if (cam_type == ref_camera_type) + num_exclude_boundary_pixels = nav_cam_num_exclude_boundary_pixels; + + // Must use the 10.7f format for the timestamp as everywhere else in the code + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s", + out_dir.c_str(), timestamp, cam_names[cam_type].c_str()); + std::string out_prefix = filename_buffer; // convert to string + + std::cout << "Creating texture for: " << out_prefix << std::endl; + meshProject(mesh, bvh_tree, cam_images[cid].image, world_to_cam[cid], cam_params[cam_type], + num_exclude_boundary_pixels, out_prefix); + } + } + // Rebuild the map. // TODO(oalexan1): This must be integrated in astrobee. void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data @@ -838,6 +965,18 @@ void calc_median_residuals(std::vector const& residuals, std::string rebuild_detector = "SURF"; std::cout << "Rebuilding map with " << rebuild_detector << " detector."; + // Set programatically the command line option for astrobee's map + // building min angle based on the corresponding refiner flag. + std::ostringstream oss; + oss << FLAGS_refiner_min_angle; + std::string min_valid_angle = oss.str(); + google::SetCommandLineOption("min_valid_angle", min_valid_angle.c_str()); + if (!gflags::GetCommandLineOption("min_valid_angle", &min_valid_angle)) + LOG(FATAL) << "Failed to get the value of --min_valid_angle in Astrobee " + << "map-building software.\n"; + // The newline below is due to the sparse map software not putting a newline + std::cout << "\nSetting --min_valid_angle " << min_valid_angle << ".\n"; + // Copy some data to make sure it does not get lost on resetting things below std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; @@ -896,8 +1035,10 @@ void calc_median_residuals(std::vector const& residuals, // TODO(oalexan1): Move this to utils. // Intersect ray with mesh. Return true on success. - bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, camera::CameraParameters const& cam_params, - Eigen::Affine3d const& world_to_cam, mve::TriangleMesh::Ptr const& mesh, + bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, + camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, double min_ray_dist, double max_ray_dist, // Output @@ -934,6 +1075,196 @@ void calc_median_residuals(std::vector const& residuals, return false; } + // Compute the transforms from the world to every camera, using pose interpolation + // if necessary. + void calc_world_to_cam_transforms( // Inputs + std::vector const& cams, std::vector const& world_to_ref_vec, + std::vector const& ref_timestamps, std::vector const& ref_to_cam_vec, + std::vector const& ref_to_cam_timestamp_offsets, + // Output + std::vector& world_to_cam) { + if (ref_to_cam_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_to_cam_timestamp_offsets.size()) + LOG(FATAL) << "Must have as many transforms to reference as timestamp offsets.\n"; + if (world_to_ref_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_timestamps.size()) + LOG(FATAL) << "Must have as many reference timestamps as reference cameras.\n"; + + world_to_cam.resize(cams.size()); + + for (size_t it = 0; it < cams.size(); it++) { + int beg_index = cams[it].beg_ref_index; + int end_index = cams[it].end_ref_index; + int cam_type = cams[it].camera_type; + world_to_cam[it] = dense_map::calc_world_to_cam_trans + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + ref_timestamps[beg_index], ref_timestamps[end_index], + ref_to_cam_timestamp_offsets[cam_type], + cams[it].timestamp); + } + return; + } + + // Match features while assuming that the input cameras can be used to filter out + // outliers by reprojection error. + void matchFeaturesWithCams(std::mutex* match_mutex, int left_image_index, int right_image_index, + camera::CameraParameters const& left_params, + camera::CameraParameters const& right_params, + Eigen::Affine3d const& left_world_to_cam, + Eigen::Affine3d const& right_world_to_cam, + double reprojection_error, + cv::Mat const& left_descriptors, cv::Mat const& right_descriptors, + Eigen::Matrix2Xd const& left_keypoints, + Eigen::Matrix2Xd const& right_keypoints, + bool verbose, + // output + MATCH_PAIR* matches) { + // Match by using descriptors first + std::vector cv_matches; + interest_point::FindMatches(left_descriptors, right_descriptors, &cv_matches); + + // Do filtering + std::vector left_vec; + std::vector right_vec; + std::vector filtered_cv_matches; + for (size_t j = 0; j < cv_matches.size(); j++) { + int left_ip_index = cv_matches.at(j).queryIdx; + int right_ip_index = cv_matches.at(j).trainIdx; + + Eigen::Vector2d dist_left_ip(left_keypoints.col(left_ip_index)[0], + left_keypoints.col(left_ip_index)[1]); + + Eigen::Vector2d dist_right_ip(right_keypoints.col(right_ip_index)[0], + right_keypoints.col(right_ip_index)[1]); + + Eigen::Vector2d undist_left_ip; + Eigen::Vector2d undist_right_ip; + left_params.Convert + (dist_left_ip, &undist_left_ip); + right_params.Convert + (dist_right_ip, &undist_right_ip); + + Eigen::Vector3d X = + dense_map::TriangulatePair(left_params.GetFocalLength(), right_params.GetFocalLength(), + left_world_to_cam, right_world_to_cam, + undist_left_ip, undist_right_ip); + + // Project back into the cameras + Eigen::Vector3d left_cam_X = left_world_to_cam * X; + Eigen::Vector2d undist_left_pix + = left_params.GetFocalVector().cwiseProduct(left_cam_X.hnormalized()); + Eigen::Vector2d dist_left_pix; + left_params.Convert(undist_left_pix, + &dist_left_pix); + + Eigen::Vector3d right_cam_X = right_world_to_cam * X; + Eigen::Vector2d undist_right_pix + = right_params.GetFocalVector().cwiseProduct(right_cam_X.hnormalized()); + Eigen::Vector2d dist_right_pix; + right_params.Convert(undist_right_pix, + &dist_right_pix); + + // Filter out points whose reprojection error is too big + bool is_good = ((dist_left_ip - dist_left_pix).norm() <= reprojection_error && + (dist_right_ip - dist_right_pix).norm() <= reprojection_error); + + // If any values above are Inf or NaN, is_good will be false as well + if (!is_good) continue; + + // Get the keypoints from the good matches + left_vec.push_back(cv::Point2f(left_keypoints.col(left_ip_index)[0], + left_keypoints.col(left_ip_index)[1])); + right_vec.push_back(cv::Point2f(right_keypoints.col(right_ip_index)[0], + right_keypoints.col(right_ip_index)[1])); + + filtered_cv_matches.push_back(cv_matches[j]); + } + + if (left_vec.empty()) return; + + // Filter using geometry constraints + // These may need some tweaking but works reasonably well. + double ransacReprojThreshold = 20.0; + cv::Mat inlier_mask; + int maxIters = 10000; + double confidence = 0.8; + + // affine2D works better than homography + // cv::Mat H = cv::findHomography(left_vec, right_vec, cv::RANSAC, + // ransacReprojThreshold, inlier_mask, maxIters, confidence); + cv::Mat H = cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, + ransacReprojThreshold, maxIters, confidence); + + std::vector left_ip, right_ip; + for (size_t j = 0; j < filtered_cv_matches.size(); j++) { + int left_ip_index = filtered_cv_matches.at(j).queryIdx; + int right_ip_index = filtered_cv_matches.at(j).trainIdx; + + if (inlier_mask.at(j, 0) == 0) continue; + + cv::Mat left_desc = left_descriptors.row(left_ip_index); + cv::Mat right_desc = right_descriptors.row(right_ip_index); + + InterestPoint left; + left.setFromCvKeypoint(left_keypoints.col(left_ip_index), left_desc); + + InterestPoint right; + right.setFromCvKeypoint(right_keypoints.col(right_ip_index), right_desc); + + left_ip.push_back(left); + right_ip.push_back(right); + } + + // Update the shared variable using a lock + match_mutex->lock(); + + // Print the verbose message inside the lock, otherwise the text + // may get messed up. + if (verbose) + std::cout << "Number of matches for pair " + << left_image_index << ' ' << right_image_index << ": " + << left_ip.size() << std::endl; + + *matches = std::make_pair(left_ip, right_ip); + match_mutex->unlock(); + } + + // Find if a given feature is an inlier, and take care to check that + // the bookkeeping is correct. + int getMapValue(std::vector>> const& pid_cid_fid, + size_t pid, int cid, int fid) { + if (pid_cid_fid.size() <= pid) + LOG(FATAL) << "Current pid is out of range.\n"; + + auto& cid_fid_map = pid_cid_fid[pid]; // alias + auto cid_it = cid_fid_map.find(cid); + if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; + + auto& fid_map = cid_it->second; // alias + auto fid_it = fid_map.find(fid); + if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; + + return fid_it->second; + } + + // Set a feature to be an outlier, and take care to check that + // the bookkeeping is correct. + void setMapValue(std::vector>> & pid_cid_fid, + size_t pid, int cid, int fid, int val) { + if (pid_cid_fid.size() <= pid) + LOG(FATAL) << "Current pid is out of range.\n"; + + auto& cid_fid_map = pid_cid_fid[pid]; // alias + auto cid_it = cid_fid_map.find(cid); + if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; + + auto& fid_map = cid_it->second; // alias + auto fid_it = fid_map.find(fid); + if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; + + fid_it->second = val; + } + } // namespace dense_map int main(int argc, char** argv) { @@ -960,6 +1291,33 @@ int main(int argc, char** argv) { if (FLAGS_timestamp_offsets_max_change < 0) LOG(FATAL) << "The timestamp offsets must be non-negative."; + if (FLAGS_refiner_min_angle <= 0.0) + LOG(FATAL) << "The min triangulation angle must be positive.\n"; + + if (FLAGS_depth_tri_weight < 0.0) + LOG(FATAL) << "The depth weight must non-negative.\n"; + + if (FLAGS_mesh_tri_weight < 0.0) + LOG(FATAL) << "The mesh weight must non-negative.\n"; + + if (FLAGS_depth_mesh_weight < 0.0) + LOG(FATAL) << "The depth mesh weight must non-negative.\n"; + + if (FLAGS_nav_cam_num_exclude_boundary_pixels < 0) + LOG(FATAL) << "Must have a non-negative value for --nav_cam_num_exclude_boundary_pixels.\n"; + + if (FLAGS_nav_cam_distortion_replacement != "") { + if (FLAGS_haz_cam_intrinsics_to_float != "" || + FLAGS_sci_cam_intrinsics_to_float != "" || + FLAGS_extrinsics_to_float != "" || + FLAGS_float_sparse_map || + FLAGS_float_scale || + FLAGS_float_timestamp_offsets) + LOG(FATAL) << "If replacing and optimizing the nav_cam model distortion, the rest " + "of the variables must be kept fixed. Once this model is found and saved, " + "a subsequent call to this tool may do various co-optimizations."; + } + // We assume our camera rig has n camera types. Each can be image or // depth + image. Just one camera must be the reference camera. In // this code it will be nav_cam. Every camera object (class @@ -997,6 +1355,7 @@ int main(int argc, char** argv) { // Outputs cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, nav_cam_to_body_trans, haz_cam_depth_to_image_transform); + std::vector orig_cam_params = cam_params; if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { for (size_t it = 0; it < cam_names.size(); it++) { @@ -1009,16 +1368,6 @@ int main(int argc, char** argv) { } } - std::vector orig_cam_params = cam_params; - - for (size_t it = 0; it < ref_to_cam_timestamp_offsets.size(); it++) { - std::cout << "Ref to cam offset for " << cam_names[it] << ' ' - << ref_to_cam_timestamp_offsets[it] << std::endl; - } - - if (FLAGS_mesh_weight <= 0.0) - LOG(FATAL) << "The mesh weight must be positive.\n"; - mve::TriangleMesh::Ptr mesh; std::shared_ptr mesh_info; std::shared_ptr graph; @@ -1045,6 +1394,23 @@ int main(int argc, char** argv) { boost::shared_ptr sparse_map = boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + // Deal with using a non-FOV model for nav_cam, if desired + Eigen::VectorXd nav_cam_distortion_replacement; + if (FLAGS_nav_cam_distortion_replacement != "") { + std::vector vec = dense_map::string_to_vector(FLAGS_nav_cam_distortion_replacement); + if (vec.size() != 4 && vec.size() != 5) + LOG(FATAL) << "nav_cam distortion replacement must consist of 4 or 5 values, corresponding" + << "to radial and tangential distortion coefficients.\n"; + nav_cam_distortion_replacement + = Eigen::Map(vec.data(), vec.size()); + } + + if (FLAGS_refiner_skip_filtering && + (cam_params[ref_cam_type].GetDistortion().size() > 1 || + nav_cam_distortion_replacement.size() != 0)) + LOG(FATAL) << "Must use outlier filtering if a non-fisheye lens distortion is used" + << "with nav_cam, as it is hard to to fit such a model at the image periphery.\n"; + // TODO(oalexan1): All this timestamp reading logic below should be in a function // Parse the ref timestamps from the sparse map @@ -1081,6 +1447,29 @@ int main(int argc, char** argv) { dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); + std::string depth_to_image_name = "depth_to_image"; + std::set extrinsics_to_float; + dense_map::parse_extrinsics_to_float(cam_names, depth_to_image_name, + FLAGS_extrinsics_to_float, extrinsics_to_float); + + if (FLAGS_float_scale && FLAGS_affine_depth_to_image) + LOG(FATAL) << "The options --float_scale and --affine_depth_to_image should not be used " + << "together. If the latter is used, the scale is always floated.\n"; + + if (!FLAGS_affine_depth_to_image && FLAGS_float_scale && + extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + LOG(FATAL) << "Cannot float the scale of depth_to_image_transform unless this " + << "this is allowed as part of --extrinsics_to_float.\n"; + + if (FLAGS_nav_cam_distortion_replacement != "") { + if (intrinsics_to_float[ref_cam_type].find("distortion") + == intrinsics_to_float[ref_cam_type].end() || + intrinsics_to_float[ref_cam_type].size() != 1) { + LOG(FATAL) << "When --nav_cam_distortion_replacement is used, must float the nav cam " + << "distortion and no other nav cam intrinsics.\n"; + } + } + // Put in arrays, so we can optimize them std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); for (int cam_type = 0; cam_type < num_cam_types; cam_type++) @@ -1088,10 +1477,9 @@ int main(int argc, char** argv) { (ref_to_cam_trans[cam_type], &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - for (size_t it = 0; it < ref_to_cam_trans.size(); it++) { - std::cout << "Ref to cam transform for " << cam_names[it] << ":\n" - << ref_to_cam_trans[it].matrix() << std::endl; - } + // Set up the variable blocks to optimize for BracketedDepthError + int num_depth_params = dense_map::NUM_RIGID_PARAMS; + if (FLAGS_affine_depth_to_image) num_depth_params = dense_map::NUM_AFFINE_PARAMS; // Depth to image transforms and scales std::vector depth_to_image_noscale; @@ -1099,12 +1487,19 @@ int main(int argc, char** argv) { depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); depth_to_image_noscale.push_back(haz_cam_depth_to_image_noscale); depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); + // Put in arrays, so we can optimize them - std::vector depth_to_image_noscale_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::rigid_transform_to_array - (depth_to_image_noscale[cam_type], - &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + std::vector depth_to_image_noscale_vec(num_cam_types * num_depth_params); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (FLAGS_affine_depth_to_image) + dense_map::affine_transform_to_array + (depth_to_image_noscale[cam_type], + &depth_to_image_noscale_vec[num_depth_params * cam_type]); + else + dense_map::rigid_transform_to_array + (depth_to_image_noscale[cam_type], + &depth_to_image_noscale_vec[num_depth_params * cam_type]); + } // Put the intrinsics in arrays std::vector focal_lengths(num_cam_types); @@ -1113,6 +1508,9 @@ int main(int argc, char** argv) { for (int it = 0; it < num_cam_types; it++) { focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths optical_centers[it] = cam_params[it].GetOpticalOffset(); + + if (cam_params[it].GetDistortion().size() == 0) + LOG(FATAL) << "The cameras are expected to have distortion."; distortions[it] = cam_params[it].GetDistortion(); } @@ -1150,8 +1548,6 @@ int main(int argc, char** argv) { // Populate the data for each camera image std::vector cams; for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { - std::cout.precision(18); - if (ref_cam_type != 0) LOG(FATAL) << "It is assumed that the ref cam type is 0."; @@ -1196,8 +1592,9 @@ int main(int argc, char** argv) { if (ref_it + 1 >= num_ref_cams) break; // Arrived at the end, cannot do a bracket // Convert the bracketing timestamps to current cam's time - double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_timestamp_offsets[cam_type]; - double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_timestamp_offsets[cam_type]; + double ref_to_cam_offset = ref_to_cam_timestamp_offsets[cam_type]; + double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_offset; + double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_offset; if (right_timestamp <= left_timestamp) LOG(FATAL) << "Ref timestamps must be in increasing order.\n"; @@ -1232,8 +1629,6 @@ int main(int argc, char** argv) { found_time)) break; // Need not succeed, but then there's no need to go on are we are at the end - std::cout.precision(18); - double curr_dist = std::abs(found_time - mid_timestamp); if (curr_dist < best_dist) { best_dist = curr_dist; @@ -1256,7 +1651,7 @@ int main(int argc, char** argv) { cam.camera_type = cam_type; cam.timestamp = best_time; - cam.ref_timestamp = best_time - ref_to_cam_timestamp_offsets[cam_type]; + cam.ref_timestamp = best_time - ref_to_cam_offset; cam.beg_ref_index = ref_it; cam.end_ref_index = ref_it + 1; cam.image = best_image; @@ -1264,25 +1659,6 @@ int main(int argc, char** argv) { upper_bound[cam_type] = std::min(upper_bound[cam_type], best_time - left_timestamp); lower_bound[cam_type] = std::max(lower_bound[cam_type], best_time - right_timestamp); - // TODO(oalexan1): Wipe this temporary block - if (cam_type == 1) { - // Must compare raw big timestamps before and after! - // Must compare residuals! - - double nav_start = ref_timestamps[ref_it]; - double haz_time = cam.ref_timestamp; - double nav_end = ref_timestamps[ref_it + 1]; - // std::cout << "--xxxhaz after " << haz_time - nav_start << ' ' << nav_end - haz_time << - // ' ' << nav_end - nav_start << std::endl; - } - if (cam_type == 2) { - double nav_start = ref_timestamps[ref_it]; - double sci_time = cam.ref_timestamp; - double nav_end = ref_timestamps[ref_it + 1]; - // std::cout << "--xxxsci after " << sci_time - nav_start << ' ' << nav_end - sci_time << - // ' ' << nav_end - nav_start << std::endl; - } - // See if to skip this timestamp if (!cam_timestamps_to_use[cam_type].empty() && cam_timestamps_to_use[cam_type].find(cam.timestamp) == @@ -1294,8 +1670,13 @@ int main(int argc, char** argv) { if (!success) continue; + // This can be useful in checking if all the sci cams were bracketed successfully. + // std::cout << std::setprecision(17) << std::fixed << "For camera " + // << cam_names[cam_type] << " pick timestamp " + // << cam.timestamp << ".\n"; + if (depth_topics[cam_type] != "") { - double found_time = -1.0; + cam.cloud_timestamp = -1.0; // will change cv::Mat cloud; // Look up the closest cloud in time (either before or after cam.timestamp) // This need not succeed. @@ -1304,14 +1685,14 @@ int main(int argc, char** argv) { // Outputs cam.depth_cloud, cloud_start_positions[cam_type], // care here - found_time); + cam.cloud_timestamp); } cams.push_back(cam); } // end loop over camera types } // end loop over ref images - std::cout << "Allowed timestamp offset range while respecting the bracket for given cameras:\n"; + std::cout << "Allowed timestamp offset range while respecting the brackets for given cameras:\n"; for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { if (cam_type == ref_cam_type) continue; // bounds don't make sense here @@ -1348,42 +1729,58 @@ int main(int argc, char** argv) { } } - std::cout << "--Deal with adjustment!" << std::endl; - for (size_t it = 0; it < cams.size(); it++) { - if (cams[it].camera_type == 2) { - dense_map::adjustImageSize(cam_params[2], cams[it].image); - } - } + // The images from the bag may need to be resized to be the same + // size as in the calibration file. Sometimes the full-res images + // can be so blurry that interest point matching fails, hence the + // resizing. + for (size_t it = 0; it < cam_params.size(); it++) + dense_map::adjustImageSize(cam_params[cams[it].camera_type], cams[it].image); + // Sort by the timestamp in reference camera time. This is essential + // for matching each image to other images close in time. std::sort(cams.begin(), cams.end(), dense_map::timestampLess); if (FLAGS_verbose) { - int count = 10000; - std::vector image_files; - for (size_t it = 0; it < cams.size(); it++) { - std::ostringstream oss; - oss << count << "_" << cams[it].camera_type << ".jpg"; - std::string name = oss.str(); - std::cout << "--writing " << name << std::endl; - cv::imwrite(name, cams[it].image); - count++; - image_files.push_back(name); - } + int count = 10000; + std::vector image_files; + for (size_t it = 0; it < cams.size(); it++) { + std::ostringstream oss; + oss << count << "_" << cams[it].camera_type << ".jpg"; + std::string name = oss.str(); + std::cout << "Writing: " << name << std::endl; + cv::imwrite(name, cams[it].image); + count++; + image_files.push_back(name); + } } + // Detect features using multiple threads. Too many threads may result + // in high memory usage. + int num_threads_val = std::min(8u, std::thread::hardware_concurrency()); + std::ostringstream oss; + oss << num_threads_val; + std::string num_threads = oss.str(); + google::SetCommandLineOption("num_threads", num_threads.c_str()); + if (!gflags::GetCommandLineOption("num_threads", &num_threads)) + LOG(FATAL) << "Failed to get the value of --num_threads in Astrobee software.\n"; + std::cout << "Using " << num_threads << " threads for feature detection/matching." << std::endl; + std::cout << "Note that this step uses a huge amount of memory." << std::endl; + std::cout << "Detecting features." << std::endl; - // Detect features using multiple threads std::vector cid_to_descriptor_map; std::vector cid_to_keypoint_map; cid_to_descriptor_map.resize(cams.size()); cid_to_keypoint_map.resize(cams.size()); - ff_common::ThreadPool thread_pool1; - for (size_t it = 0; it < cams.size(); it++) { - thread_pool1.AddTask(&dense_map::detectFeatures, cams[it].image, FLAGS_verbose, - &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + { + // Make the threadpool go out of scope when not needed to not use up memory + ff_common::ThreadPool thread_pool; + for (size_t it = 0; it < cams.size(); it++) { + thread_pool.AddTask(&dense_map::detectFeatures, cams[it].image, FLAGS_verbose, + &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + } + thread_pool.Join(); } - thread_pool1.Join(); dense_map::MATCH_MAP matches; @@ -1394,30 +1791,54 @@ int main(int argc, char** argv) { } } - std::cout << "Matching features." << std::endl; - ff_common::ThreadPool thread_pool2; - std::mutex match_mutex; - for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { - auto pair = image_pairs[pair_it]; - int left_image_it = pair.first, right_image_it = pair.second; - bool verbose = true; - thread_pool2.AddTask(&dense_map::matchFeatures, &match_mutex, - left_image_it, right_image_it, - cid_to_descriptor_map[left_image_it], - cid_to_descriptor_map[right_image_it], - cid_to_keypoint_map[left_image_it], - cid_to_keypoint_map[right_image_it], - verbose, &matches[pair]); + { + // The transform from the world to every camera + std::vector world_to_cam; + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + + std::cout << "Matching features." << std::endl; + ff_common::ThreadPool thread_pool; + std::mutex match_mutex; + for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { + auto pair = image_pairs[pair_it]; + int left_image_it = pair.first, right_image_it = pair.second; + if (FLAGS_refiner_skip_filtering) { + thread_pool.AddTask(&dense_map::matchFeatures, + &match_mutex, + left_image_it, right_image_it, + cid_to_descriptor_map[left_image_it], + cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], + cid_to_keypoint_map[right_image_it], + FLAGS_verbose, &matches[pair]); + } else { + thread_pool.AddTask(&dense_map::matchFeaturesWithCams, + &match_mutex, + left_image_it, right_image_it, + cam_params[cams[left_image_it].camera_type], + cam_params[cams[right_image_it].camera_type], + world_to_cam[left_image_it], + world_to_cam[right_image_it], + FLAGS_initial_max_reprojection_error, + cid_to_descriptor_map[left_image_it], + cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], + cid_to_keypoint_map[right_image_it], + FLAGS_verbose, &matches[pair]); + } + } + thread_pool.Join(); } - thread_pool2.Join(); + cid_to_descriptor_map = std::vector(); // Wipe, takes memory // If feature A in image I matches feather B in image J, which matches feature C in image K, // then (A, B, C) belong together into a track. Build such a track. std::vector, int>> keypoint_map(cams.size()); - int num_total_matches = 0; // temporary - // Give all interest points in a given image a unique id for (auto it = matches.begin(); it != matches.end(); it++) { std::pair const& index_pair = it->first; // alias @@ -1433,15 +1854,9 @@ int main(int argc, char** argv) { auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); keypoint_map[left_index][dist_left_ip] = 0; keypoint_map[right_index][dist_right_ip] = 0; - num_total_matches++; } } - std::cout << "--bbb num total matches " << num_total_matches << std::endl; - std::cout << "--why so many more matches than pid?" << std::endl; - std::cout << "--test adding missing pairs!" << std::endl; - std::cout << "--must do two passes!" << std::endl; - // Give all interest points in a given image a unique id // And put them in a vector with the id corresponding to the interest point std::vector>> keypoint_vec(cams.size()); @@ -1450,15 +1865,10 @@ int main(int argc, char** argv) { for (auto ip_it = keypoint_map[cam_it].begin(); ip_it != keypoint_map[cam_it].end(); ip_it++) { ip_it->second = count; count++; - // std::cout << "--value " << (ip_it->first).first << ' ' << (ip_it->first).second << ' ' - // << ip_it->second << std::endl; keypoint_vec[cam_it].push_back(ip_it->first); - // std::cout << "--size is " << keypoint_vec[cam_it].size() << std::endl; } } - std::cout << "--write my own function! It should just remove conflicts!" << std::endl; - openMVG::matching::PairWiseMatches match_map; for (auto it = matches.begin(); it != matches.end(); it++) { std::pair const& index_pair = it->first; // alias @@ -1476,10 +1886,6 @@ int main(int argc, char** argv) { auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); - // std::cout << "zzz1 " << left_index << ' ' << dist_left_ip.first << ' ' << - // dist_left_ip.second << ' ' << right_index << ' ' << dist_right_ip.first << ' ' << - // dist_right_ip.second << std::endl; - int left_id = keypoint_map[left_index][dist_left_ip]; int right_id = keypoint_map[right_index][dist_right_ip]; mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); @@ -1495,8 +1901,6 @@ int main(int argc, char** argv) { int left_index = index_pair.first; int right_index = index_pair.second; - std::cout << "--indices " << left_index << ' ' << right_index << std::endl; - std::ostringstream oss1; oss1 << (10000 + left_index) << "_" << cams[left_index].camera_type; @@ -1511,18 +1915,18 @@ int main(int argc, char** argv) { std::string match_file = left_stem + "__" + right_stem + ".match"; - std::cout << "Writing: " << left_image << ' ' << right_image << ' ' << match_file << std::endl; + std::cout << "Writing: " << left_image << ' ' << right_image << ' ' + << match_file << std::endl; dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); } } - std::cout << "Find other things which need deallocation!" << std::endl; - std::cout << "De-allocate images and point clouds when no longer needed!" << std::endl; + // TODO(oalexan1): De-allocate all data when no longer need as + // the matching logic takes a lot of time. // not needed anymore and take up a lot of RAM matches.clear(); matches = dense_map::MATCH_MAP(); keypoint_map.clear(); keypoint_map.shrink_to_fit(); - cid_to_descriptor_map.clear(); cid_to_descriptor_map.shrink_to_fit(); cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); std::vector > pid_to_cid_fid; @@ -1537,9 +1941,8 @@ int main(int argc, char** argv) { // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} openMVG::tracks::STLMAPTracks map_tracks; trackBuilder.ExportToSTL(map_tracks); - - // TODO(oalexan1): Print how many pairwise matches were there before - // and after filtering tracks. + match_map = openMVG::matching::PairWiseMatches(); // wipe this, no longer needed + trackBuilder = openMVG::tracks::TracksBuilder(); // wipe it if (map_tracks.empty()) LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; @@ -1557,84 +1960,6 @@ int main(int argc, char** argv) { } } - // The transform from every camera to the world - std::vector world_to_cam(cams.size()); - for (size_t it = 0; it < cams.size(); it++) { - int beg_index = cams[it].beg_ref_index; - int end_index = cams[it].end_ref_index; - int cam_type = cams[it].camera_type; - // std::cout << "--ref indices " << beg_index << ' ' << end_index << std::endl; - // std::cout << "--cam type " << cam_type << std::endl; - world_to_cam[it] = dense_map::calc_world_to_cam_trans - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - ref_timestamps[beg_index], ref_timestamps[end_index], - ref_to_cam_timestamp_offsets[cam_type], - cams[it].timestamp); - - // std::cout << "--trans for camera: " << it << ' ' << world_to_cam[it].matrix() << std::endl; - } - - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - // std::cout << std::endl; - // std::cout << "pid is " << pid << std::endl; - // std::cout << "---aaa pid size is " << pid_to_cid_fid[pid].size() << std::endl; - // std::cout << "zzz2 "; - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - // std::cout << cid << ' ' << keypoint_vec[cid][fid].first << ' '; - // std::cout << keypoint_vec[cid][fid].second << " "; - } - // std::cout << std::endl; - } - - // Do multiview triangulation - std::vector xyz_vec(pid_to_cid_fid.size()); - - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - Eigen::Vector3d mesh_xyz(0, 0, 0); - int num = 0; - - std::vector focal_length_vec; - std::vector world_to_cam_vec; - std::vector pix_vec; - - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - Eigen::Vector2d undist_ip; - cam_params[cams[cid].camera_type].Convert - (dist_ip, &undist_ip); - - focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); - world_to_cam_vec.push_back(world_to_cam[cid]); - pix_vec.push_back(undist_ip); - } - - // Triangulate n rays emanating from given undistorted and centered pixels - Eigen::Vector3d joint_xyz = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); - - xyz_vec[pid] = joint_xyz; - - // std::cout << "--xyz1 " << pid << ' ' << xyz_vec[pid].transpose() << std::endl; - } - - std::cout << "--must do two passes!" << std::endl; - std::cout << "--must filter by min triangulation angle and points behind camera" << std::endl; - - // std::vector> cid_fid_to_pid; - // sparse_mapping::InitializeCidFidToPid(cams.size(), pid_to_cid_fid, &cid_fid_to_pid); - - std::cout << "---too many outliers!" << std::endl; - - std::cout << "--start selecting!" << std::endl; - // Set up the variable blocks to optimize for BracketedCamError std::vector bracketed_cam_block_sizes; int num_focal_lengths = 1; @@ -1646,8 +1971,7 @@ int main(int argc, char** argv) { bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); bracketed_cam_block_sizes.push_back(num_focal_lengths); bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); - std::cout << "--make bracketed block sizes individual!" << std::endl; - bracketed_cam_block_sizes.push_back(num_distortion_params); // must be last, will be modified later + bracketed_cam_block_sizes.push_back(num_distortion_params); // will be modified later // Set up the variable blocks to optimize for RefCamError std::vector ref_cam_block_sizes; @@ -1655,23 +1979,31 @@ int main(int argc, char** argv) { ref_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); ref_cam_block_sizes.push_back(num_focal_lengths); ref_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); - std::cout << "--make ref block sizes individual!" << std::endl; - ref_cam_block_sizes.push_back(num_distortion_params); // must be last, will be modified later + ref_cam_block_sizes.push_back(num_distortion_params); // will be modified later - // Set up the variable blocks to optimize for BracketedDepthError + // Set up variable blocks to optimize for BracketedDepthError std::vector bracketed_depth_block_sizes; bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(num_depth_params); bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + // Set up the variable blocks to optimize for BracketedDepthMeshError + std::vector bracketed_depth_mesh_block_sizes; + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(num_depth_params); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + // Set up the variable blocks to optimize for RefDepthError std::vector ref_depth_block_sizes; ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_depth_block_sizes.push_back(num_depth_params); ref_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); ref_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); @@ -1679,356 +2011,665 @@ int main(int argc, char** argv) { std::vector mesh_block_sizes; mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - std::cout << "must test with the ref cam having depth!" << std::endl; + // For a given fid = pid_to_cid_fid[pid][cid], the value + // pid_cid_fid_inlier[pid][cid][fid] will be non-zero only if this + // pixel is an inlier. Originally all pixels are inliers. Once an + // inlier becomes an outlier, it never becomes an inlier again. + std::vector>> pid_cid_fid_inlier; + if (!FLAGS_refiner_skip_filtering) { + pid_cid_fid_inlier.resize(pid_to_cid_fid.size()); + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + pid_cid_fid_inlier[pid][cid][fid] = 1; + } + } + } + + for (int pass = 0; pass < FLAGS_refiner_num_passes; pass++) { + std::cout << "\nOptimization pass " << pass + 1 << " / " << FLAGS_refiner_num_passes << "\n"; - // Form the problem - ceres::Problem problem; - std::vector residual_names; - std::vector residual_scales; - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - for (auto cid_fid = pid_to_cid_fid[pid].begin(); - cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; + // The transforms from the world to all cameras must be updated + // given the current state of optimization + std::vector world_to_cam; + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); - int cam_type = cams[cid].camera_type; - int beg_ref_index = cams[cid].beg_ref_index; - int end_ref_index = cams[cid].end_ref_index; + // Do multiview triangulation + std::vector xyz_vec(pid_to_cid_fid.size()); - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + std::vector focal_length_vec; + std::vector world_to_cam_vec; + std::vector pix_vec; - if (cam_type == ref_cam_type) { - // The cost function of projecting in the ref cam. - ceres::CostFunction* ref_cost_function = - dense_map::RefCamError::Create(dist_ip, ref_cam_block_sizes, - cam_params[cam_type]); - ceres::LossFunction* ref_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - // std::cout << "--add block" << std::endl; - residual_names.push_back(cam_names[cam_type] + "_pix_x"); - residual_names.push_back(cam_names[cam_type] + "_pix_y"); - residual_scales.push_back(1.0); - residual_scales.push_back(1.0); - problem.AddResidualBlock - (ref_cost_function, ref_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &xyz_vec[pid][0], - &focal_lengths[cam_type], - &optical_centers[cam_type][0], - &distortions[cam_type][0]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - } + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; - Eigen::Vector3d depth_xyz(0, 0, 0); - if (!dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) - continue; // could not look up the depth value - - // std::cout << "--depth xyz is " << depth_xyz.transpose() << std::endl; - - // The constraint that the depth points agree with triangulated points - ceres::CostFunction* ref_depth_cost_function - = dense_map::RefDepthError::Create(FLAGS_depth_weight, - depth_xyz, - ref_depth_block_sizes); - - ceres::LossFunction* ref_depth_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - residual_names.push_back(cam_names[cam_type] + "_depth_x_m"); - residual_names.push_back(cam_names[cam_type] + "_depth_y_m"); - residual_names.push_back(cam_names[cam_type] + "_depth_z_m"); - residual_scales.push_back(FLAGS_depth_weight); - residual_scales.push_back(FLAGS_depth_weight); - residual_scales.push_back(FLAGS_depth_weight); - problem.AddResidualBlock - (ref_depth_cost_function, - ref_depth_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &depth_to_image_scales[cam_type], - &xyz_vec[pid][0]); - - if (!FLAGS_float_sparse_map) - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - if (!FLAGS_float_scale) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + // Triangulate inliers only + if (!FLAGS_refiner_skip_filtering && + !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; - } else { - // Other cameras, which need bracketing - ceres::CostFunction* bracketed_cost_function = - dense_map::BracketedCamError::Create(dist_ip, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_cam_block_sizes, - cam_params[cam_type]); - ceres::LossFunction* bracketed_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - // std::cout << "--add block" << std::endl; - residual_names.push_back(cam_names[cam_type] + "_pix_x"); - residual_names.push_back(cam_names[cam_type] + "_pix_y"); - residual_scales.push_back(1.0); - residual_scales.push_back(1.0); - problem.AddResidualBlock - (bracketed_cost_function, bracketed_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &xyz_vec[pid][0], - &ref_to_cam_timestamp_offsets[cam_type], - &focal_lengths[cam_type], - &optical_centers[cam_type][0], - &distortions[cam_type][0]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - Eigen::Vector3d depth_xyz(0, 0, 0); - if (!dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) - continue; // could not look up the depth value - - // std::cout << "--depth xyz is " << depth_xyz.transpose() << std::endl; - - // Ensure that the depth points agree with triangulated points - ceres::CostFunction* bracketed_depth_cost_function - = dense_map::BracketedDepthError::Create(FLAGS_depth_weight, - depth_xyz, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_depth_block_sizes); - - ceres::LossFunction* bracketed_depth_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - residual_names.push_back(cam_names[cam_type] + "_depth_x_m"); - residual_names.push_back(cam_names[cam_type] + "_depth_y_m"); - residual_names.push_back(cam_names[cam_type] + "_depth_z_m"); - residual_scales.push_back(FLAGS_depth_weight); - residual_scales.push_back(FLAGS_depth_weight); - residual_scales.push_back(FLAGS_depth_weight); - problem.AddResidualBlock - (bracketed_depth_cost_function, - bracketed_depth_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &depth_to_image_scales[cam_type], - &xyz_vec[pid][0], - &ref_to_cam_timestamp_offsets[cam_type]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + if (cams[cid].camera_type == ref_cam_type) { + // Flag as outliers pixels at the nav_cam boundary, if desired. This + // is especially important when the nav_cam uses the radtan + // model instead of fisheye. + Eigen::Vector2i dist_size = cam_params[cams[cid].camera_type].GetDistortedSize(); + int excl = FLAGS_nav_cam_num_exclude_boundary_pixels; + if (dist_ip.x() < excl || dist_ip.x() > dist_size[0] - 1 - excl || + dist_ip.y() < excl || dist_ip.y() > dist_size[1] - 1 - excl) { + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + continue; + } } - if (!FLAGS_float_scale) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); + + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); + + focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); + world_to_cam_vec.push_back(world_to_cam[cid]); + pix_vec.push_back(undist_ip); + } + + if (!FLAGS_refiner_skip_filtering && pix_vec.size() < 2) { + // If after outlier filtering less than two rays are left, can't triangulate. + // Must set all features for this pid to outliers. + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); } + + // Nothing else to do + continue; } + + // Triangulate n rays emanating from given undistorted and centered pixels + xyz_vec[pid] = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); } - } - if (FLAGS_mesh != "") { - // Add the mesh constraint + if (pass == 0 && nav_cam_distortion_replacement.size() > 1) { + // At the first pass, right after triangulation is done with a + // given nav cam model, which presumably was pretty accurate, + // replace its distortion if desired, which we will then + // optimize. + std::cout << "Setting nav cam distortion to: " << nav_cam_distortion_replacement.transpose() + << ". Will proceed to optimize it.\n"; + cam_params[ref_cam_type].SetDistortion(nav_cam_distortion_replacement); + distortions[ref_cam_type] = cam_params[ref_cam_type].GetDistortion(); + } + // For a given fid = pid_to_cid_fid[pid][cid], + // the value pid_cid_fid_to_residual[pid][cid][fid] will be the index in the array + // of residuals (look only at pixel residuals). This structure is populated only for + // inliers, so its size changes at each pass. + std::vector>> pid_cid_fid_to_residual; + if (!FLAGS_refiner_skip_filtering) pid_cid_fid_to_residual.resize(pid_to_cid_fid.size()); + + // Form the problem + ceres::Problem problem; + std::vector residual_names; + std::vector residual_scales; for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - Eigen::Vector3d mesh_xyz(0, 0, 0); - int num = 0; - - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { int cid = cid_fid->first; int fid = cid_fid->second; + // Deal with inliers only + if (!FLAGS_refiner_skip_filtering && + !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; + + int cam_type = cams[cid].camera_type; + int beg_ref_index = cams[cid].beg_ref_index; + int end_ref_index = cams[cid].end_ref_index; + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - Eigen::Vector2d undist_ip; - cam_params[cams[cid].camera_type].Convert - (dist_ip, &undist_ip); - bool have_mesh_intersection = false; - // Try to make the intersection point be on the mesh and the nav cam ray - // to make the sci cam to conform to that. - // TODO(oalexan1): Think more of the range of the ray below - double min_ray_dist = 0.0; - double max_ray_dist = 10.0; - Eigen::Vector3d intersection(0.0, 0.0, 0.0); - have_mesh_intersection - = dense_map::ray_mesh_intersect(undist_ip, cam_params[cams[cid].camera_type], - world_to_cam[cid], mesh, bvh_tree, - min_ray_dist, max_ray_dist, - // Output - intersection); - - if (have_mesh_intersection) { - mesh_xyz += intersection; - num += 1; + if (cam_type == ref_cam_type) { + // The cost function of projecting in the ref cam. + ceres::CostFunction* ref_cost_function = + dense_map::RefCamError::Create(dist_ip, ref_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* ref_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // Remember the index of the residuals about to create + if (!FLAGS_refiner_skip_filtering) + pid_cid_fid_to_residual[pid][cid][fid] = residual_names.size(); + + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (ref_cost_function, ref_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &xyz_vec[pid][0], + &focal_lengths[cam_type], + &optical_centers[cam_type][0], + &distortions[cam_type][0]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + } + + Eigen::Vector3d depth_xyz(0, 0, 0); + if (FLAGS_depth_tri_weight == 0 || + !dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) + continue; // could not look up the depth value + + // The constraint that the depth points agree with triangulated points + ceres::CostFunction* ref_depth_cost_function + = dense_map::RefDepthError::Create(FLAGS_depth_tri_weight, + depth_xyz, + ref_depth_block_sizes); + + ceres::LossFunction* ref_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back("depth_tri_x_m"); + residual_names.push_back("depth_tri_y_m"); + residual_names.push_back("depth_tri_z_m"); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + problem.AddResidualBlock + (ref_depth_cost_function, + ref_depth_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &depth_to_image_noscale_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0]); + + if (!FLAGS_float_sparse_map) + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&depth_to_image_noscale_vec[num_depth_params * cam_type]); + + } else { + // Other cameras, which need bracketing + ceres::CostFunction* bracketed_cost_function = + dense_map::BracketedCamError::Create(dist_ip, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* bracketed_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // Remember the index of the residuals about to create + if (!FLAGS_refiner_skip_filtering) + pid_cid_fid_to_residual[pid][cid][fid] = residual_names.size(); + + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (bracketed_cost_function, bracketed_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type], + &focal_lengths[cam_type], + &optical_centers[cam_type][0], + &distortions[cam_type][0]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { + problem.SetParameterBlockConstant + (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + } + + Eigen::Vector3d depth_xyz(0, 0, 0); + if (FLAGS_depth_tri_weight == 0 || + !dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) + continue; // could not look up the depth value + + // Ensure that the depth points agree with triangulated points + ceres::CostFunction* bracketed_depth_cost_function + = dense_map::BracketedDepthError::Create(FLAGS_depth_tri_weight, + depth_xyz, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_block_sizes); + + ceres::LossFunction* bracketed_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back("depth_tri_x_m"); + residual_names.push_back("depth_tri_y_m"); + residual_names.push_back("depth_tri_z_m"); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + problem.AddResidualBlock + (bracketed_depth_cost_function, + bracketed_depth_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &depth_to_image_noscale_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type]); + + // TODO(oalexan1): This code repeats too much. Need to keep a hash + // of sparse map pointers. + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { + problem.SetParameterBlockConstant + (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + } + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&depth_to_image_noscale_vec[num_depth_params * cam_type]); } } + } - if (num >= 1) { - mesh_xyz /= num; + if (FLAGS_mesh != "") { + // Add the mesh constraint + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + Eigen::Vector3d mesh_xyz(0, 0, 0); + int num_intersections = 0; // Number of mesh intersections given the rays for this pid + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!FLAGS_refiner_skip_filtering && + !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); + + // Intersect the ray with the mesh + bool have_mesh_intersection = false; + Eigen::Vector3d mesh_intersect(0.0, 0.0, 0.0); + have_mesh_intersection + = dense_map::ray_mesh_intersect(undist_ip, cam_params[cams[cid].camera_type], + world_to_cam[cid], mesh, bvh_tree, + FLAGS_min_ray_dist, FLAGS_max_ray_dist, + // Output + mesh_intersect); + + if (have_mesh_intersection) { + mesh_xyz += mesh_intersect; + num_intersections += 1; + } - // std::cout << "--num and mesh xyz is " << num << ' ' << mesh_xyz.transpose() << std::endl; + if (FLAGS_depth_mesh_weight > 0 && have_mesh_intersection) { + // Try to make each mesh intersection agree with corresponding depth measurement, + // if it exists + Eigen::Vector3d depth_xyz(0, 0, 0); + if (dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) { + int cam_type = cams[cid].camera_type; + int beg_ref_index = cams[cid].beg_ref_index; + int end_ref_index = cams[cid].end_ref_index; + if (cam_type != ref_cam_type) { + // TODO(oalexan1): What to do when cam_type == ref_cam_type? + // Ensure that the depth points agree with mesh points + ceres::CostFunction* bracketed_depth_mesh_cost_function + = dense_map::BracketedDepthMeshError::Create(FLAGS_depth_mesh_weight, + depth_xyz, + mesh_intersect, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_mesh_block_sizes); + + ceres::LossFunction* bracketed_depth_mesh_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back("depth_mesh_x_m"); + residual_names.push_back("depth_mesh_y_m"); + residual_names.push_back("depth_mesh_z_m"); + residual_scales.push_back(FLAGS_depth_mesh_weight); + residual_scales.push_back(FLAGS_depth_mesh_weight); + residual_scales.push_back(FLAGS_depth_mesh_weight); + problem.AddResidualBlock + (bracketed_depth_mesh_cost_function, + bracketed_depth_mesh_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &depth_to_image_noscale_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &ref_to_cam_timestamp_offsets[cam_type]); + + // TODO(oalexan1): This code repeats too much. Need to keep a hash + // of sparse map pointers. + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { + problem.SetParameterBlockConstant + (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + } + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&depth_to_image_noscale_vec[num_depth_params * cam_type]); + } + } + } + } - ceres::CostFunction* mesh_cost_function = - dense_map::XYZError::Create(mesh_xyz, mesh_block_sizes, FLAGS_mesh_weight); + if (num_intersections >= 1 && FLAGS_mesh_tri_weight > 0) { + // Average the intersections of all rays with the mesh, and try to make + // the triangulated point agree with the mesh intersection - ceres::LossFunction* mesh_loss_function = - dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + mesh_xyz /= num_intersections; - problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, - &xyz_vec[pid][0]); + ceres::CostFunction* mesh_cost_function = + dense_map::XYZError::Create(mesh_xyz, mesh_block_sizes, FLAGS_mesh_tri_weight); - residual_names.push_back("mesh_x_m"); - residual_names.push_back("mesh_y_m"); - residual_names.push_back("mesh_z_m"); + ceres::LossFunction* mesh_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - residual_scales.push_back(FLAGS_mesh_weight); - residual_scales.push_back(FLAGS_mesh_weight); - residual_scales.push_back(FLAGS_mesh_weight); + problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, + &xyz_vec[pid][0]); + + residual_names.push_back("mesh_tri_x_m"); + residual_names.push_back("mesh_tri_y_m"); + residual_names.push_back("mesh_tri_z_m"); + + residual_scales.push_back(FLAGS_mesh_tri_weight); + residual_scales.push_back(FLAGS_mesh_tri_weight); + residual_scales.push_back(FLAGS_mesh_tri_weight); + } } } - // std::cout << "--xyz1 " << pid << ' ' << xyz_vec[pid].transpose() << std::endl; - } - // See which intrinsics from which cam to float or keep fixed - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { - if (intrinsics_to_float[cam_type].find("focal_length") == intrinsics_to_float[cam_type].end()) { - std::cout << "For " << cam_names[cam_type] << " not floating focal_length." << std::endl; - problem.SetParameterBlockConstant(&focal_lengths[cam_type]); - } else { - std::cout << "For " << cam_names[cam_type] << " floating focal_length." << std::endl; + // See which intrinsics from which cam to float or keep fixed + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (intrinsics_to_float[cam_type].find("focal_length") == + intrinsics_to_float[cam_type].end()) { + // std::cout << "For " << cam_names[cam_type] << " not floating focal_length." << std::endl; + problem.SetParameterBlockConstant(&focal_lengths[cam_type]); + } else { + // std::cout << "For " << cam_names[cam_type] << " floating focal_length." << std::endl; + } + if (intrinsics_to_float[cam_type].find("optical_center") == + intrinsics_to_float[cam_type].end()) { + // std::cout << "For " << cam_names[cam_type] << " not floating optical_center." << + // std::endl; + problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); + } else { + // std::cout << "For " << cam_names[cam_type] << " floating optical_center." << std::endl; + } + if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) { + // std::cout << "For " << cam_names[cam_type] << " not floating distortion." << std::endl; + problem.SetParameterBlockConstant(&distortions[cam_type][0]); + } else { + // std::cout << "For " << cam_names[cam_type] << " floating distortion." << std::endl; + } } - if (intrinsics_to_float[cam_type].find("optical_center") == intrinsics_to_float[cam_type].end()) { - std::cout << "For " << cam_names[cam_type] << " not floating optical_center." << std::endl; - problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); - } else { - std::cout << "For " << cam_names[cam_type] << " floating optical_center." << std::endl; + + // Evaluate the residuals before optimization + double total_cost = 0.0; + std::vector residuals; + ceres::Problem::EvaluateOptions eval_options; + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale + residuals[it] /= residual_scales[it]; + dense_map::calc_median_residuals(residuals, residual_names, "before opt"); + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "Initial res " << residual_names[it] << " " << residuals[it] << std::endl; } - if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) { - std::cout << "For " << cam_names[cam_type] << " not floating distortion." << std::endl; - problem.SetParameterBlockConstant(&distortions[cam_type][0]); - } else { - std::cout << "For " << cam_names[cam_type] << " floating distortion." << std::endl; + + // Solve the problem + ceres::Solver::Options options; + ceres::Solver::Summary summary; + options.linear_solver_type = ceres::ITERATIVE_SCHUR; + options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread + options.max_num_iterations = FLAGS_num_iterations; + options.minimizer_progress_to_stdout = true; + options.gradient_tolerance = 1e-16; + options.function_tolerance = 1e-16; + options.parameter_tolerance = FLAGS_parameter_tolerance; + ceres::Solve(options, &problem, &summary); + + // The optimization is done. Right away copy the optimized states to where they belong to + // keep all data in sync. + + // Copy back the reference transforms + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::array_to_rigid_transform(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // Copy back the optimized intrinsics + for (int it = 0; it < num_cam_types; it++) { + cam_params[it].SetFocalLength(Eigen::Vector2d(focal_lengths[it], focal_lengths[it])); + cam_params[it].SetOpticalOffset(optical_centers[it]); + cam_params[it].SetDistortion(distortions[it]); } - } - // Evaluate the residual before optimization - double total_cost = 0.0; - std::vector residuals; - ceres::Problem::EvaluateOptions eval_options; - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "There must be as many residual names as residual values."; - if (residuals.size() != residual_scales.size()) - LOG(FATAL) << "There must be as many residual values as residual scales."; - for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale - residuals[it] /= residual_scales[it]; - dense_map::calc_median_residuals(residuals, residual_names, "before opt"); - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "initial res " << residual_names[it] << " " << residuals[it] << std::endl; - } + // If the nav cam did not get optimized, go back to the solution + // with two focal lengths, rather than the one with one focal length + // solved by this solver (as the average of the two). The two focal + // lengths are very similar, but it is not worth modifying the + // camera model we don't plan to optimize. + if (FLAGS_nav_cam_intrinsics_to_float == "" || FLAGS_num_iterations == 0) + cam_params[ref_cam_type] = orig_cam_params[ref_cam_type]; + + // Copy back the optimized extrinsics + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::array_to_rigid_transform + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Copy back the depth to image transforms without scales + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (FLAGS_affine_depth_to_image) + dense_map::array_to_affine_transform + (depth_to_image_noscale[cam_type], + &depth_to_image_noscale_vec[num_depth_params * cam_type]); + else + dense_map::array_to_rigid_transform( + depth_to_image_noscale[cam_type], + &depth_to_image_noscale_vec[num_depth_params * cam_type]); + } - // Copy the offsets to specific cameras - std::cout.precision(18); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - std::cout << "--offset before " << ref_to_cam_timestamp_offsets[cam_type] << std::endl; - - // Solve the problem - ceres::Solver::Options options; - ceres::Solver::Summary summary; - options.linear_solver_type = ceres::ITERATIVE_SCHUR; - options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread - options.max_num_iterations = FLAGS_num_iterations; - options.minimizer_progress_to_stdout = true; - options.gradient_tolerance = 1e-16; - options.function_tolerance = 1e-16; - options.parameter_tolerance = FLAGS_parameter_tolerance; - ceres::Solve(options, &problem, &summary); - - // Evaluate the residual after optimization - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "There must be as many residual names as residual values."; - if (residuals.size() != residual_scales.size()) - LOG(FATAL) << "There must be as many residual values as residual scales."; - for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale - residuals[it] /= residual_scales[it]; - dense_map::calc_median_residuals(residuals, residual_names, "after opt"); - if (FLAGS_verbose) { + // Evaluate the residuals after optimization + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale + residuals[it] /= residual_scales[it]; + dense_map::calc_median_residuals(residuals, residual_names, "after opt"); + if (FLAGS_verbose) { for (size_t it = 0; it < residuals.size(); it++) - std::cout << "final res " << residual_names[it] << " " << residuals[it] << std::endl; - } + std::cout << "Final res " << residual_names[it] << " " << residuals[it] << std::endl; + } - // Copy back the reference transforms - std::cout.precision(18); - std::cout << "--sparse map before\n" << sparse_map->cid_to_cam_t_global_[0].matrix() << std::endl; + // Flag outliers + if (!FLAGS_refiner_skip_filtering) { + // Before computing outliers by triangulation angle must recompute all the cameras, + // given the optimized parameters + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); - for (int cid = 0; cid < num_ref_cams; cid++) - dense_map::array_to_rigid_transform(world_to_ref_t[cid], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + // Must deal with outliers by triangulation angle before + // removing outliers by reprojection error, as the latter will + // exclude some rays which form the given triangulated points. + int num_outliers_by_angle = 0, num_total_features = 0; + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + // Find the largest angle among any two intersecting rays + double max_rays_angle = 0.0; - std::cout << "--sparse map after\n" << sparse_map->cid_to_cam_t_global_[0].matrix() << std::endl; + for (auto cid_fid1 = pid_to_cid_fid[pid].begin(); + cid_fid1 != pid_to_cid_fid[pid].end(); cid_fid1++) { + int cid1 = cid_fid1->first; + int fid1 = cid_fid1->second; - // Copy back the optimized intrinsics - for (int it = 0; it < num_cam_types; it++) { - cam_params[it].SetFocalLength(Eigen::Vector2d(focal_lengths[it], focal_lengths[it])); - cam_params[it].SetOpticalOffset(optical_centers[it]); - cam_params[it].SetDistortion(distortions[it]); - } + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid1, fid1)) continue; - // The nav cam did not get optimized. Go back to the solution with - // two focal lengths, rather than the one with one focal length - // solved by this solver (as the average of the two). The two focal - // lengths are very similar, but it is not worth modifying the - // camera model we don't plan to optimize. - if (FLAGS_nav_cam_intrinsics_to_float == "" || FLAGS_num_iterations == 0) - cam_params[ref_cam_type] = orig_cam_params[ref_cam_type]; + num_total_features++; - // Copy back the optimized extrinsics - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::array_to_rigid_transform - (ref_to_cam_trans[cam_type], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + Eigen::Vector3d cam_ctr1 = (world_to_cam[cid1].inverse()) * Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d ray1 = xyz_vec[pid] - cam_ctr1; + ray1.normalize(); - // Copy back the depth to image transforms without scales - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::array_to_rigid_transform - (depth_to_image_noscale[cam_type], - &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + for (auto cid_fid2 = pid_to_cid_fid[pid].begin(); + cid_fid2 != pid_to_cid_fid[pid].end(); cid_fid2++) { + int cid2 = cid_fid2->first; + int fid2 = cid_fid2->second; + + // Look at each cid and next cids + if (cid2 <= cid1) + continue; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid2, fid2)) continue; + + Eigen::Vector3d cam_ctr2 = (world_to_cam[cid2].inverse()) * Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d ray2 = xyz_vec[pid] - cam_ctr2; + ray2.normalize(); + + double curr_angle = (180.0 / M_PI) * acos(ray1.dot(ray2)); + + if (std::isnan(curr_angle) || std::isinf(curr_angle)) continue; + + max_rays_angle = std::max(max_rays_angle, curr_angle); + } + } + + if (max_rays_angle >= FLAGS_refiner_min_angle) + continue; // This is a good triangulated point, with large angle of convergence + + // Flag as outliers all the features for this cid + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; + + num_outliers_by_angle++; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } + } + std::cout << std::setprecision(4) << "Removed " << num_outliers_by_angle + << " outlier features with small angle of convergence, out of " + << num_total_features << " (" + << (100.0 * num_outliers_by_angle) / num_total_features << " %)\n"; + + int num_outliers_reproj = 0; + num_total_features = 0; // reusing this variable + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; + + num_total_features++; + + // Find the pixel residuals + size_t residual_index = dense_map::getMapValue(pid_cid_fid_to_residual, pid, cid, fid); + if (residuals.size() <= residual_index + 1) LOG(FATAL) << "Too few residuals.\n"; + + double res_x = residuals[residual_index + 0]; + double res_y = residuals[residual_index + 1]; + // NaN values will never be inliers if the comparison is set as below + bool is_good = (Eigen::Vector2d(res_x, res_y).norm() <= FLAGS_max_reprojection_error); + if (!is_good) { + num_outliers_reproj++; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } + } + } + std::cout << std::setprecision(4) << "Removed " << num_outliers_reproj + << " outlier features using reprojection error, out of " << num_total_features + << " (" << (100.0 * num_outliers_reproj) / num_total_features << " %)\n"; + } + } // Update the optimized depth to image (for haz cam only) int cam_type = 1; // haz cam @@ -2041,7 +2682,8 @@ int main(int argc, char** argv) { ref_to_cam_timestamp_offsets, haz_cam_depth_to_image_transform); - if (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "") { + if (FLAGS_num_iterations > 0 && + (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "")) { std::cout << "Either the sparse map intrinsics or cameras got modified. " << "The map must be rebuilt." << std::endl; @@ -2052,6 +2694,20 @@ int main(int argc, char** argv) { std::cout << "Writing: " << FLAGS_output_map << std::endl; sparse_map->Save(FLAGS_output_map); + if (FLAGS_out_texture_dir != "") { + if (FLAGS_mesh == "") + LOG(FATAL) << "Cannot project camera images onto a mesh if a mesh was not provided.\n"; + + // The transform from the world to every camera + std::vector world_to_cam; + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + meshProjectCameras(cam_names, cam_params, cams, world_to_cam, mesh, bvh_tree, ref_cam_type, + FLAGS_nav_cam_num_exclude_boundary_pixels, FLAGS_out_texture_dir); + } + return 0; } // NOLINT diff --git a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py index 06881ffd..387483e1 100644 --- a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py +++ b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py @@ -93,6 +93,8 @@ # run into old files index_file = os.path.join(args.camera_dir, args.camera_type + "_index.txt") +camera_files = [] + with open(index_file, "r") as f: for image_file in f: image_file = image_file.rstrip() @@ -103,6 +105,7 @@ print("Expecting a .jpg file, but got: " + image_file) in_cam = os.path.join(args.camera_dir, m.group(1) + suffix) + camera_files.append(in_cam) out_cam = args.undistorted_image_dir + "/" + os.path.basename(in_cam) @@ -120,13 +123,13 @@ M = np.linalg.inv(M) # world to camera print("Writing: " + out_cam) - with open(out_cam, "w") as f: + with open(out_cam, "w") as g: # translation - f.write("%0.17g %0.17g %0.17g " % (M[0][3], M[1][3], M[2][3])) + g.write("%0.17g %0.17g %0.17g " % (M[0][3], M[1][3], M[2][3])) # rotation - f.write( + g.write( "%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" % ( M[0][0], @@ -142,7 +145,15 @@ ) # normaized inrinsics - f.write( + g.write( "%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" % (nf, d0, d1, paspect, ncx, ncy) ) + +# Save the name of the camera transforms. This will be used later +# for individual texturing of each image and camera. +camera_list = os.path.join(args.camera_dir, args.camera_type + "_transforms.txt") +with open(camera_list, "w") as f: + print("Writing: " + camera_list) + for camera in camera_files: + f.write(camera + "\n") diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index bff83728..7614f4d8 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -240,15 +240,14 @@ bool findInterpolatedPose(double desired_nav_cam_time, return false; camera::CameraModel end_localized_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), sparse_map->GetCameraParameters()); - if (!sparse_map->Localize(end_image, &end_localized_cam, NULL, NULL, nearby_cid_ptr)) return false; + if (!sparse_map->Localize(end_image, &end_localized_cam, NULL, NULL, nearby_cid_ptr)) + return false; Eigen::Affine3d end_trans = end_localized_cam.GetTransform(); - for (size_t it = 0; it < nearby_cid.size(); it++) { - // std::cout << "nearby image: " << sparse_map->cid_to_filename_[nearby_cid[it]] - // << std::endl; - } // std::cout << "localizing cloud at time " << desired_nav_cam_time << std::endl; - double alpha = (desired_nav_cam_time - beg_nav_cam_time) / (end_nav_cam_time - beg_nav_cam_time); + double alpha = (desired_nav_cam_time - beg_nav_cam_time) + / (end_nav_cam_time - beg_nav_cam_time); + if (end_nav_cam_time == beg_nav_cam_time) alpha = 0.0; // handle division by zero Eigen::Affine3d interp_trans = dense_map::linearInterp(alpha, beg_trans, end_trans); diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index 3d9b62b2..7166adb8 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -83,8 +83,8 @@ def process_args(args): default="sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160", help="The central region to keep after undistorting an image and " + "before texturing. For sci_cam the numbers are at 1/4th of the full " - + "resolution and will be adjusted for the actual input image dimensions. " - + "Use a list in quotes.", + + "resolution (resolution of calibration) and will be adjusted for the " + + "actual input image dimensions. Use a list in quotes.", ) parser.add_argument( "--haz_cam_points_topic", @@ -312,6 +312,15 @@ def process_args(args): action="store_true", help="Save many intermediate datasets for debugging.", ) + + parser.add_argument( + "--texture_individual_images", + dest="texture_individual_images", + action="store_true", + help="If specified, in addition to a joint texture of all images create individual " + + "textures for each image and camera. Does not work with simulated cameras. For debugging.", + ) + args = parser.parse_args() # Parse the crop windows @@ -681,6 +690,30 @@ def create_texrecon_cameras(args, src_path, undist_dir, cam_type): run_cmd(cmd, log_file, verbose=args.verbose) +def texture_individual_images(args, exec_path, mesh, dist_image_list, cam_type): + + camera_list = os.path.join(args.output_dir, cam_type + "_transforms.txt") + orthoproject_path = os.path.join(exec_path, "orthoproject") + + cmd = [ + orthoproject_path, + "-mesh", + mesh, + "-image_list", + dist_image_list, + "-camera_list", + camera_list, + "--camera_name", + cam_type, + "-output_prefix", + args.output_dir + "/" + cam_type, + ] + + log_file = os.path.join(args.output_dir, "orthoproject_log.txt") + print("Projecting individual images. Writing the output log to: " + log_file) + run_cmd(cmd, log_file, verbose=args.verbose) + + def run_texrecon(args, src_path, mesh, undist_dir, cam_type): # That is one long path @@ -693,7 +726,7 @@ def run_texrecon(args, src_path, mesh, undist_dir, cam_type): if not os.path.exists(texrecon_path): raise Exception("Cannot find: " + texrecon_path) - texrecon_dir = os.path.join(args.output_dir, cam_type + "_texture/run") + texrecon_dir = os.path.join(args.output_dir, cam_type + "_texture/" + cam_type) parent_dir = os.path.dirname(texrecon_dir) if os.path.isdir(parent_dir): # Wipe the existing directory @@ -743,7 +776,7 @@ def find_sci_cam_scale(image_file): return float(image_width) / float(config_width) -def texture_mesh(src_path, cam_type, crop_win_map, mesh, args): +def texture_mesh(src_path, exec_path, cam_type, crop_win_map, mesh, args): if args.simulated_data and cam_type == "nav_cam": print("Texturing nav_cam is not supported with simulated data.") return "None" @@ -778,6 +811,10 @@ def texture_mesh(src_path, cam_type, crop_win_map, mesh, args): args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale ) create_texrecon_cameras(args, src_path, undist_dir, cam_type) + + if args.texture_individual_images: + texture_individual_images(args, exec_path, mesh, dist_image_list, cam_type) + textured_mesh = run_texrecon(args, src_path, mesh, undist_dir, cam_type) else: # Simulated images don't have distortion @@ -936,7 +973,7 @@ def merge_poses_and_clouds(args): if start_step <= 9: for camera_type in args.camera_types.split(): textured_mesh = texture_mesh( - src_path, camera_type, crop_win_map, simplified_mesh, args + src_path, exec_path, camera_type, crop_win_map, simplified_mesh, args ) textured_meshes += [textured_mesh] diff --git a/dense_map/geometry_mapper/tools/image_picker.cc b/dense_map/geometry_mapper/tools/image_picker.cc index 6e010ed0..6026d5a6 100644 --- a/dense_map/geometry_mapper/tools/image_picker.cc +++ b/dense_map/geometry_mapper/tools/image_picker.cc @@ -73,7 +73,7 @@ DEFINE_double(max_time_between_images, 1.0e+10, "Select additional nav cam images to make the timestamps between any two " "consecutive such images be no more than this."); -DEFINE_double(bracket_len, 2.0, +DEFINE_double(bracket_len, 0.6, "Lookup sci and haz cam images only between consecutive nav cam images " "whose distance in time is no more than this (in seconds). It is assumed " "the robot moves slowly and uniformly during this time."); @@ -137,14 +137,17 @@ int main(int argc, char** argv) { double nav_cam_to_haz_cam_timestamp_offset = nav_to_cam_timestamp_offset[1]; double nav_cam_to_sci_cam_timestamp_offset = nav_to_cam_timestamp_offset[2]; - std::cout << "nav_cam_to_haz_cam_timestamp_offset = " << nav_cam_to_haz_cam_timestamp_offset << "\n"; - std::cout << "nav_cam_to_sci_cam_timestamp_offset = " << nav_cam_to_sci_cam_timestamp_offset << "\n"; + std::cout << "nav_cam_to_haz_cam_timestamp_offset = " << nav_cam_to_haz_cam_timestamp_offset + << "\n"; + std::cout << "nav_cam_to_sci_cam_timestamp_offset = " << nav_cam_to_sci_cam_timestamp_offset + << "\n"; // TODO(oalexan1): Make the logic below into functions // Read the haz cam data we will need double haz_cam_start_time = -1.0; - std::vector const& haz_cam_intensity_msgs = haz_cam_intensity_handle.bag_msgs; + std::vector const& haz_cam_intensity_msgs + = haz_cam_intensity_handle.bag_msgs; for (size_t it = 0; it < haz_cam_intensity_msgs.size(); it++) { sensor_msgs::Image::ConstPtr image_msg = haz_cam_intensity_msgs[it].instantiate(); @@ -166,6 +169,7 @@ int main(int argc, char** argv) { // Read the sci cam data we will need (images and clouds) std::vector const& sci_cam_msgs = sci_cam_handle.bag_msgs; std::cout << "Number of sci cam images " << sci_cam_msgs.size() << std::endl; + std::set sci_cam_set; for (size_t it = 0; it < sci_cam_msgs.size(); it++) { sensor_msgs::Image::ConstPtr image_msg = sci_cam_msgs[it].instantiate(); if (image_msg) { @@ -178,6 +182,7 @@ int main(int argc, char** argv) { if (comp_image_msg) { double sci_cam_time = comp_image_msg->header.stamp.toSec(); all_sci_cam_timestamps.push_back(sci_cam_time); + sci_cam_set.insert(sci_cam_time); } } if (all_sci_cam_timestamps.empty()) LOG(FATAL) << "No sci cam timestamps."; @@ -196,10 +201,12 @@ int main(int argc, char** argv) { } } } + + std::vector sci_cam_timestamps_plus_extra = all_sci_cam_timestamps; for (size_t extra_it = 0; extra_it < extra_sci_cam_timestamps.size(); extra_it++) { - all_sci_cam_timestamps.push_back(extra_sci_cam_timestamps[extra_it]); + sci_cam_timestamps_plus_extra.push_back(extra_sci_cam_timestamps[extra_it]); } - std::sort(all_sci_cam_timestamps.begin(), all_sci_cam_timestamps.end()); + std::sort(sci_cam_timestamps_plus_extra.begin(), sci_cam_timestamps_plus_extra.end()); // Read the nav cam data we will need std::vector const& nav_cam_msgs = nav_cam_handle.bag_msgs; @@ -227,13 +234,15 @@ int main(int argc, char** argv) { // time in the list of nav and sci messages. // Keep the nav image whose time plus FLAGS_bracket_len is <= the given sci cam image. int num_nav = all_nav_cam_timestamps.size(); - int num_sci = all_sci_cam_timestamps.size(); + int num_sci_plus_extra = sci_cam_timestamps_plus_extra.size(); + int num_sci = sci_cam_set.size(); int nav_cam_pos = 0; // used to narrow down the lookup - for (int sci_it = 0; sci_it < num_sci; sci_it++) { + int num_bracketed_sci_cams = 0; + for (int sci_it = 0; sci_it < num_sci_plus_extra; sci_it++) { for (int nav_it1 = nav_cam_pos; nav_it1 < num_nav; nav_it1++) { // Adjust for timestamp offsets double t1 = all_nav_cam_timestamps[nav_it1]; - double s = all_sci_cam_timestamps[sci_it] - nav_cam_to_sci_cam_timestamp_offset; + double s = sci_cam_timestamps_plus_extra[sci_it] - nav_cam_to_sci_cam_timestamp_offset; bool is_good1 = (t1 >= s - d/2.0 && t1 <= s); @@ -242,14 +251,27 @@ int main(int argc, char** argv) { nav_cam_pos = nav_it1; // save this for the future // Now get the right bracket - for (size_t nav_it2 = num_nav - 1; nav_it2 >= 0; nav_it2--) { + // Use an int counter, as with unsigned int values subtracting is dangerous + for (int nav_it2 = num_nav - 1; nav_it2 >= 0; nav_it2--) { double t2 = all_nav_cam_timestamps[nav_it2]; bool is_good2 = (s < t2 && t2 <= s + d/2.0); if (is_good2) { nav_cam_timestamps.push_back(all_nav_cam_timestamps[nav_it1]); nav_cam_timestamps.push_back(all_nav_cam_timestamps[nav_it2]); - std::cout << "sci - nav1 and nav2 - sci: " << s - t1 << ' ' << t2 - s << std::endl; + std::cout << std::setprecision(17) << std::fixed << "For "; + if (sci_cam_set.find(sci_cam_timestamps_plus_extra[sci_it]) != sci_cam_set.end()) + std::cout << "sci_cam "; + else + std::cout << "extra "; + std::cout << "timestamp " + << sci_cam_timestamps_plus_extra[sci_it] << std::setprecision(4) + << ", adj_stamp - nav1 and nav2 - adj_stamp: " << s - t1 << ' ' << t2 - s + << std::endl; + + if (sci_cam_set.find(sci_cam_timestamps_plus_extra[sci_it]) != sci_cam_set.end()) + num_bracketed_sci_cams++; + break; // Found what we needed, stop this loop } } @@ -259,10 +281,20 @@ int main(int argc, char** argv) { } } + if (num_bracketed_sci_cams == num_sci) + std::cout << "Bracketed all " << num_sci << " sci_cam timestamps." << std::endl; + else + std::cout << "Bracketed only " << num_bracketed_sci_cams << " sci cam timestamps " + << "out of " << num_sci << ", not counting the extra timestamps. " + << "Perhaps a bigger bracket may be desired but that may result in inaccurate " + << "timestamp interpolation later. Ensure existing images are removed before new " + << "ones are added.\n"; + // Ensure the timestamps are sorted std::sort(nav_cam_timestamps.begin(), nav_cam_timestamps.end()); // Write the images to disk + std::cout << "Writing the images to: " << FLAGS_output_nav_cam_dir << std::endl; bool save_grayscale = true; // For feature matching need grayscale for (size_t nav_it = 0; nav_it < nav_cam_timestamps.size(); nav_it++) { double found_time = -1.0; // won't be used, but expected by the api @@ -277,7 +309,6 @@ int main(int argc, char** argv) { snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", FLAGS_output_nav_cam_dir.c_str(), nav_cam_timestamps[nav_it]); - std::cout << "Writing: " << filename_buffer << std::endl; cv::imwrite(filename_buffer, image); } return 0; diff --git a/dense_map/geometry_mapper/tools/old_orthoproject.cc b/dense_map/geometry_mapper/tools/old_orthoproject.cc new file mode 100644 index 00000000..121278b3 --- /dev/null +++ b/dense_map/geometry_mapper/tools/old_orthoproject.cc @@ -0,0 +1,383 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +// This code used to project a camera image onto a plane most aligned +// to the mesh portion seen in the current camera view. It is no +// longer needed, it is broken, but may be of use one day. + +#if 0 + +DEFINE_string(image, "", + "The image to orthoproject. For each image named image.jpg, " + "an input file image_world2cam.txt is expected, and the output " + "orthoimage will be written to image_ortho.png"); + +DEFINE_string(image_list, "", + "Instead of a single image, specify the list of images to orthoproject, " + "with the same convention as above. "); + +DEFINE_double(min_ray_length, 0.0, + "The shortest distance from the camera to the mesh."); + +DEFINE_double(max_ray_length, 10.0, + "The longest distance from the camera to the mesh."); + +DEFINE_double(pixel_size, 0.01, + "The output orthoimage pixel size, in meters."); + +DEFINE_bool(snap_plane, false, + "Adjust the plane to project onto to make " + "angles multiple of 45 degrees with the coordinate axes."); + +DEFINE_string(plane_normal, "", + "Override the auto-computed the normal of the plane to " + "project onto with this. Specify as three values in quotes."); + +// Shoot a set of rays from the camera onto the mesh and see +// where they intersect +void sample_mesh(cv::Mat const& image, + camera::CameraModel const& cam, + std::shared_ptr const& bvh_tree, + std::vector & intersections) { + // Wipe the output + intersections.clear(); + + Eigen::Vector3d cam_ctr = -cam.GetTransform().rotation().transpose() * + cam.GetTransform().translation(); + + // Sample the image uniformly + int num_samples = 100; + Eigen::Matrix3d cam_to_world_rot = cam.GetTransform().rotation().transpose(); +#pragma omp parallel for + for (int itx = 0; itx < num_samples; itx++) { + for (int ity = 0; ity < num_samples; ity++) { + double x = (image.cols - 1.0) * static_cast(itx) / (num_samples - 1.0); + double y = (image.rows - 1.0) * static_cast(ity) / (num_samples - 1.0); + + Eigen::Vector2d dist_pix; + dist_pix << x, y; + + Eigen::Vector2d undist_centered_pix; + cam.GetParameters().Convert + (dist_pix, &undist_centered_pix); + + // Ray from camera going through the pixel + Eigen::Vector3d cam_ray(undist_centered_pix.x() / cam.GetParameters().GetFocalVector()[0], + undist_centered_pix.y() / cam.GetParameters().GetFocalVector()[1], + 1.0); + cam_ray.normalize(); + + Eigen::Vector3d world_ray = cam_to_world_rot * cam_ray; + + // Set up the ray structure for the mesh + BVHTree::Ray bvh_ray; + bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); + bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); + bvh_ray.dir.normalize(); + bvh_ray.tmin = FLAGS_min_ray_length; + bvh_ray.tmax = FLAGS_max_ray_length; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (!bvh_tree->intersect(bvh_ray, &hit)) + continue; + double cam_to_mesh_dist = hit.t; + + Eigen::Vector3d intersection = cam_ctr + cam_to_mesh_dist * world_ray; + +#pragma omp critical + { + intersections.push_back(intersection); + } + } + } + + if (intersections.empty()) + LOG(FATAL) << "Could not project the image onto the mesh.\n"; +} + +// Create a new coordinate system having the desired plane as its x-y +// plane, and the normal to the plane as its z axis. +void setup_plane_coord_system(camera::CameraModel const& cam, + Eigen::Vector3d & plane_normal, + Eigen::Matrix3d & world2planecoord) { + // The camera view (the ray starting at the camera center and going through + // the image center). + Eigen::Matrix3d cam_to_world_rot = cam.GetTransform().rotation().transpose(); + Eigen::Vector3d cam_dir = cam_to_world_rot * Eigen::Vector3d(0, 0, 1); + + // Ensure that the normal to the plane agrees with the camera direction + double dot_prod = cam_dir.dot(plane_normal); + if (dot_prod < 0) + plane_normal *= -1.0; + + // The "up" direction + Eigen::Vector3d z(0, 0, 1); + + Eigen::Vector3d lz = plane_normal.normalized(); + // The new y points "down", as suggested by the z axis, while still + // being in the plane + Eigen::Vector3d ly = -1.0 * (z - z.dot(plane_normal) * plane_normal); + // Treat the special case when the plane is perfectly horizontal, + // then the normal can't point down. Normalize. + if (ly.norm() == 0) + ly = Eigen::Vector3d(0, 1, 0); + ly.normalize(); + // The new x is then the remaining direction so that we have a + // right-handed coordinate system. + Eigen::Vector3d lx = ly.cross(lz); + if (lx.norm() == 0) { + lx = Eigen::Vector3d(1, 0, 0); // should never happen + ly = lz.cross(lx); + } + lx.normalize(); + + // The matrix to transform to the coordinate system having the + // desired plane to project onto as its x-y plane. + Eigen::Matrix3d planecoord2world(3, 3); + planecoord2world.col(0) = lx; + planecoord2world.col(1) = ly; + planecoord2world.col(2) = lz; + + world2planecoord = planecoord2world.transpose(); +} + +void orthoproject(cv::Mat const& image, + Eigen::Matrix3d const& world2planecoord, + Eigen::Vector3d const& plane_normal, + camera::CameraModel const& cam, + std::shared_ptr const& bvh_tree, + std::vector const& intersections, + double pixel_size, + cv::Mat & orthoimage) { + // Find the bounding box of the mesh points seen by the camera + // in the coordinate system of the desired plane + + double min_x = std::numeric_limits::max(), max_x = -min_x; + double min_y = min_x, max_y = max_x; + double min_z = min_x, max_z = max_x; + for (size_t it = 0; it < intersections.size(); it++) { + Eigen::Vector3d local_pt = world2planecoord * intersections[it]; + if (local_pt.x() < min_x) min_x = local_pt.x(); + if (local_pt.x() > max_x) max_x = local_pt.x(); + if (local_pt.y() < min_y) min_y = local_pt.y(); + if (local_pt.y() > max_y) max_y = local_pt.y(); + if (local_pt.z() < min_z) min_z = local_pt.z(); + if (local_pt.z() > max_z) max_z = local_pt.z(); + } + + if (min_x > max_x || min_y > max_y || min_z > max_z) + LOG(FATAL) << "The rays don't intersect the mesh."; + + // The output image dimensions + int num_cols = ceil((max_x - min_x)/pixel_size); + int num_rows = ceil((max_y - min_y)/pixel_size); + + orthoimage = cv::Mat(num_rows, num_cols, CV_8UC1, cv::Scalar(0)); + +#pragma omp parallel for + for (int col = 0; col < num_cols; col++) { + for (int row = 0; row < num_rows; row++) { + // Create a point in the orthoimage plane corresponding to a future pixel + double z = min_z; + double y = min_y + row * pixel_size + pixel_size/2.0; + double x = min_x + col * pixel_size + pixel_size/2.0; + Eigen::Vector3d world_pt = world2planecoord.transpose() * Eigen::Vector3d(x, y, z); + + // Shoot a ray from that point to the mesh + BVHTree::Ray bvh_ray; + bvh_ray.dir = dense_map::eigen_to_vec3f(plane_normal); + bvh_ray.origin = dense_map::eigen_to_vec3f(world_pt); + bvh_ray.tmin = 0; + bvh_ray.tmax = max_z - min_z; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (!bvh_tree->intersect(bvh_ray, &hit)) + continue; + + double plane_to_mesh_dist = hit.t; + Eigen::Vector3d intersection = world_pt + plane_to_mesh_dist * plane_normal; + Eigen::Vector3d cam_pt = cam.GetTransform() * intersection; + + // Skip points that project behind the camera + if (cam_pt.z() <= 0) + continue; + + // Get the distorted pixel value + Eigen::Vector2d undist_centered_pix = + cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); + Eigen::Vector2d dist_pix; + cam.GetParameters().Convert + (undist_centered_pix, &dist_pix); + + // Skip pixels that don't project in the image + if (dist_pix.x() < 0 || dist_pix.x() > image.cols - 1 || + dist_pix.y() < 0 || dist_pix.y() > image.rows - 1) + continue; + + // Find the pixel value + cv::Size s(1, 1); + cv::Mat interp_pix_val; + cv::Point2f P; + P.x = dist_pix[0]; + P.y = dist_pix[1]; + + // Do bilinear interpolation into the image + cv::getRectSubPix(image, s, P, interp_pix_val); + uchar color = interp_pix_val.at(0, 0); + + orthoimage.at(row, col) = color; + } + } +} + +void run_orthoproject(bool snap_plane, + std::string const& image_file, + double pixel_size, + std::string const& plane_normal_str, + std::shared_ptr const& bvh_tree, + camera::CameraModel & cam) { + // Manufacture the world_to_cam_file and output orthoimage_file + std::string world_to_cam_file, orthoimage_file; + + world_to_cam_file = ff_common::ReplaceInStr(image_file, + ".jpg", "_world2cam.txt"); + orthoimage_file = ff_common::ReplaceInStr(image_file, ".jpg", "_ortho.png"); + + if (world_to_cam_file == image_file || orthoimage_file == image_file) + LOG(FATAL) << "Expecting a .jpg image for file: " << image_file; + + Eigen::Affine3d world_to_cam; + if (!dense_map::readAffine(world_to_cam, world_to_cam_file)) + LOG(FATAL) << "Could not read a 4x4 matrix from: " << world_to_cam_file << "\n"; + cam.SetTransform(world_to_cam); + + cv::Mat image = cv::imread(image_file, CV_LOAD_IMAGE_GRAYSCALE); + if (image.rows == 0 || image.cols == 0) + LOG(FATAL) << "Could not read image from: " << image_file; + + // Shoot a set of rays from the camera onto the mesh and see + // where they intersect + std::vector intersections; + sample_mesh(image, cam, bvh_tree, intersections); + + Eigen::Vector3d plane_normal; + if (plane_normal_str != "") { + // The user wants to override the plane normal + std::istringstream is(plane_normal_str); + if (!(is >> plane_normal[0] >> plane_normal[1] >> plane_normal[2])) + LOG(FATAL) << "Could not parse three values (the plane normal) from: " << plane_normal_str; + std::cout << "Using user-defined normal to the plane: " + << plane_normal.transpose() << std::endl; + } else { + // Find the best fit plane for the projections of the pixels onto the walls + Eigen::Vector3d centroid; + dense_map::bestFitPlane(intersections, centroid, plane_normal); + + std::cout << "Normal to the plane: " << plane_normal.transpose() << std::endl; + + // Make the plane have angles of 45 degrees with the coordinate axes + if (snap_plane) { + dense_map::snapPlaneNormal(plane_normal); + std::cout << "Snapped normal to the plane: " << plane_normal.transpose() << std::endl; + } + } + + plane_normal.normalize(); + + // Create a new coordinate system having the desired plane as its x-y + // plane, and the normal to the plane as its z axis. + Eigen::Matrix3d world2planecoord; + setup_plane_coord_system(cam, plane_normal, world2planecoord); + + cv::Mat orthoimage; + orthoproject(image, world2planecoord, plane_normal, cam, + bvh_tree, intersections, pixel_size, + orthoimage); + + std::cout << "Writing: " << orthoimage_file << std::endl; + cv::imwrite(orthoimage_file, orthoimage); +} + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + + if (FLAGS_mesh.empty()) + LOG(FATAL) << "The mesh was not specified."; + + // Load the mesh + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; // TODO(oalexan1): Is this necessary? + std::shared_ptr bvh_tree; + dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + + // Load the camera intrinsics + config_reader::ConfigReader config; + config.AddFile("cameras.config"); + if (!config.ReadFiles()) + LOG(FATAL) << "Could not read the configuration file."; + camera::CameraParameters cam_params(&config, "nav_cam"); + camera::CameraModel cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), + cam_params); + + // Run either a single image or a list of images + std::vector image_list; + if (!FLAGS_image.empty()) { + image_list.push_back(FLAGS_image); + } else if (!FLAGS_image_list.empty()) { + std::ifstream ifs(FLAGS_image_list.c_str()); + std::string image_file; + while (ifs >> image_file) + image_list.push_back(image_file); + } else { + LOG(FATAL) << "Must specify either an image to orthoproject or an image list."; + } + + for (size_t it = 0; it < image_list.size(); it++) + run_orthoproject(FLAGS_snap_plane, + image_list[it], + FLAGS_pixel_size, + FLAGS_plane_normal, + bvh_tree, cam); + + return 0; +} +#endif + +int main(int argc, char** argv) { + std::cout << "This code is broken and commented out." << std::endl; +} diff --git a/dense_map/geometry_mapper/tools/orthoproject.cc b/dense_map/geometry_mapper/tools/orthoproject.cc index d622d924..85cf1a8e 100644 --- a/dense_map/geometry_mapper/tools/orthoproject.cc +++ b/dense_map/geometry_mapper/tools/orthoproject.cc @@ -33,355 +33,12 @@ #include -#if 0 - -// Old broken code, project onto a plane. See current working code further down. - -DEFINE_string(image, "", - "The image to orthoproject. For each image named image.jpg, " - "an input file image_world2cam.txt is expected, and the output " - "orthoimage will be written to image_ortho.png"); - -DEFINE_string(image_list, "", - "Instead of a single image, specify the list of images to orthoproject, " - "with the same convention as above. "); - -DEFINE_double(min_ray_length, 0.0, - "The shortest distance from the camera to the mesh."); - -DEFINE_double(max_ray_length, 10.0, - "The longest distance from the camera to the mesh."); - -DEFINE_double(pixel_size, 0.01, - "The output orthoimage pixel size, in meters."); - -DEFINE_bool(snap_plane, false, - "Adjust the plane to project onto to make " - "angles multiple of 45 degrees with the coordinate axes."); - -DEFINE_string(plane_normal, "", - "Override the auto-computed the normal of the plane to " - "project onto with this. Specify as three values in quotes."); - -// Shoot a set of rays from the camera onto the mesh and see -// where they intersect -void sample_mesh(cv::Mat const& image, - camera::CameraModel const& cam, - std::shared_ptr const& bvh_tree, - std::vector & intersections) { - // Wipe the output - intersections.clear(); - - Eigen::Vector3d cam_ctr = -cam.GetTransform().rotation().transpose() * - cam.GetTransform().translation(); - - // Sample the image uniformly - int num_samples = 100; - Eigen::Matrix3d cam_to_world_rot = cam.GetTransform().rotation().transpose(); -#pragma omp parallel for - for (int itx = 0; itx < num_samples; itx++) { - for (int ity = 0; ity < num_samples; ity++) { - double x = (image.cols - 1.0) * static_cast(itx) / (num_samples - 1.0); - double y = (image.rows - 1.0) * static_cast(ity) / (num_samples - 1.0); - - Eigen::Vector2d dist_pix; - dist_pix << x, y; - - Eigen::Vector2d undist_centered_pix; - cam.GetParameters().Convert - (dist_pix, &undist_centered_pix); - - // Ray from camera going through the pixel - Eigen::Vector3d cam_ray(undist_centered_pix.x() / cam.GetParameters().GetFocalVector()[0], - undist_centered_pix.y() / cam.GetParameters().GetFocalVector()[1], - 1.0); - cam_ray.normalize(); - - Eigen::Vector3d world_ray = cam_to_world_rot * cam_ray; - - // Set up the ray structure for the mesh - BVHTree::Ray bvh_ray; - bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); - bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); - bvh_ray.dir.normalize(); - bvh_ray.tmin = FLAGS_min_ray_length; - bvh_ray.tmax = FLAGS_max_ray_length; - - // Intersect the ray with the mesh - BVHTree::Hit hit; - if (!bvh_tree->intersect(bvh_ray, &hit)) - continue; - double cam_to_mesh_dist = hit.t; - - Eigen::Vector3d intersection = cam_ctr + cam_to_mesh_dist * world_ray; - -#pragma omp critical - { - intersections.push_back(intersection); - } - } - } - - if (intersections.empty()) - LOG(FATAL) << "Could not project the image onto the mesh.\n"; -} - -// Create a new coordinate system having the desired plane as its x-y -// plane, and the normal to the plane as its z axis. -void setup_plane_coord_system(camera::CameraModel const& cam, - Eigen::Vector3d & plane_normal, - Eigen::Matrix3d & world2planecoord) { - // The camera view (the ray starting at the camera center and going through - // the image center). - Eigen::Matrix3d cam_to_world_rot = cam.GetTransform().rotation().transpose(); - Eigen::Vector3d cam_dir = cam_to_world_rot * Eigen::Vector3d(0, 0, 1); - - // Ensure that the normal to the plane agrees with the camera direction - double dot_prod = cam_dir.dot(plane_normal); - if (dot_prod < 0) - plane_normal *= -1.0; - - // The "up" direction - Eigen::Vector3d z(0, 0, 1); - - Eigen::Vector3d lz = plane_normal.normalized(); - // The new y points "down", as suggested by the z axis, while still - // being in the plane - Eigen::Vector3d ly = -1.0 * (z - z.dot(plane_normal) * plane_normal); - // Treat the special case when the plane is perfectly horizontal, - // then the normal can't point down. Normalize. - if (ly.norm() == 0) - ly = Eigen::Vector3d(0, 1, 0); - ly.normalize(); - // The new x is then the remaining direction so that we have a - // right-handed coordinate system. - Eigen::Vector3d lx = ly.cross(lz); - if (lx.norm() == 0) { - lx = Eigen::Vector3d(1, 0, 0); // should never happen - ly = lz.cross(lx); - } - lx.normalize(); - - // The matrix to transform to the coordinate system having the - // desired plane to project onto as its x-y plane. - Eigen::Matrix3d planecoord2world(3, 3); - planecoord2world.col(0) = lx; - planecoord2world.col(1) = ly; - planecoord2world.col(2) = lz; - - world2planecoord = planecoord2world.transpose(); -} - -void orthoproject(cv::Mat const& image, - Eigen::Matrix3d const& world2planecoord, - Eigen::Vector3d const& plane_normal, - camera::CameraModel const& cam, - std::shared_ptr const& bvh_tree, - std::vector const& intersections, - double pixel_size, - cv::Mat & orthoimage) { - // Find the bounding box of the mesh points seen by the camera - // in the coordinate system of the desired plane - - double min_x = std::numeric_limits::max(), max_x = -min_x; - double min_y = min_x, max_y = max_x; - double min_z = min_x, max_z = max_x; - for (size_t it = 0; it < intersections.size(); it++) { - Eigen::Vector3d local_pt = world2planecoord * intersections[it]; - if (local_pt.x() < min_x) min_x = local_pt.x(); - if (local_pt.x() > max_x) max_x = local_pt.x(); - if (local_pt.y() < min_y) min_y = local_pt.y(); - if (local_pt.y() > max_y) max_y = local_pt.y(); - if (local_pt.z() < min_z) min_z = local_pt.z(); - if (local_pt.z() > max_z) max_z = local_pt.z(); - } - - if (min_x > max_x || min_y > max_y || min_z > max_z) - LOG(FATAL) << "The rays don't intersect the mesh."; - - // The output image dimensions - int num_cols = ceil((max_x - min_x)/pixel_size); - int num_rows = ceil((max_y - min_y)/pixel_size); - - orthoimage = cv::Mat(num_rows, num_cols, CV_8UC1, cv::Scalar(0)); - -#pragma omp parallel for - for (int col = 0; col < num_cols; col++) { - for (int row = 0; row < num_rows; row++) { - // Create a point in the orthoimage plane corresponding to a future pixel - double z = min_z; - double y = min_y + row * pixel_size + pixel_size/2.0; - double x = min_x + col * pixel_size + pixel_size/2.0; - Eigen::Vector3d world_pt = world2planecoord.transpose() * Eigen::Vector3d(x, y, z); - - // Shoot a ray from that point to the mesh - BVHTree::Ray bvh_ray; - bvh_ray.dir = dense_map::eigen_to_vec3f(plane_normal); - bvh_ray.origin = dense_map::eigen_to_vec3f(world_pt); - bvh_ray.tmin = 0; - bvh_ray.tmax = max_z - min_z; - - // Intersect the ray with the mesh - BVHTree::Hit hit; - if (!bvh_tree->intersect(bvh_ray, &hit)) - continue; - - double plane_to_mesh_dist = hit.t; - Eigen::Vector3d intersection = world_pt + plane_to_mesh_dist * plane_normal; - Eigen::Vector3d cam_pt = cam.GetTransform() * intersection; - - // Skip points that project behind the camera - if (cam_pt.z() <= 0) - continue; - - // Get the distorted pixel value - Eigen::Vector2d undist_centered_pix = - cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); - Eigen::Vector2d dist_pix; - cam.GetParameters().Convert - (undist_centered_pix, &dist_pix); - - // Skip pixels that don't project in the image - if (dist_pix.x() < 0 || dist_pix.x() > image.cols - 1 || - dist_pix.y() < 0 || dist_pix.y() > image.rows - 1) - continue; - - // Find the pixel value - cv::Size s(1, 1); - cv::Mat interp_pix_val; - cv::Point2f P; - P.x = dist_pix[0]; - P.y = dist_pix[1]; - - // Do bilinear interpolation into the image - cv::getRectSubPix(image, s, P, interp_pix_val); - uchar color = interp_pix_val.at(0, 0); - - orthoimage.at(row, col) = color; - } - } -} - -void run_orthoproject(bool snap_plane, - std::string const& image_file, - double pixel_size, - std::string const& plane_normal_str, - std::shared_ptr const& bvh_tree, - camera::CameraModel & cam) { - // Manufacture the world_to_cam_file and output orthoimage_file - std::string world_to_cam_file, orthoimage_file; - - world_to_cam_file = ff_common::ReplaceInStr(image_file, - ".jpg", "_world2cam.txt"); - orthoimage_file = ff_common::ReplaceInStr(image_file, ".jpg", "_ortho.png"); - - if (world_to_cam_file == image_file || orthoimage_file == image_file) - LOG(FATAL) << "Expecting a .jpg image for file: " << image_file; - - Eigen::Affine3d world_to_cam; - if (!dense_map::readAffine(world_to_cam, world_to_cam_file)) - LOG(FATAL) << "Could not read a 4x4 matrix from: " << world_to_cam_file << "\n"; - cam.SetTransform(world_to_cam); - - cv::Mat image = cv::imread(image_file, CV_LOAD_IMAGE_GRAYSCALE); - if (image.rows == 0 || image.cols == 0) - LOG(FATAL) << "Could not read image from: " << image_file; - - // Shoot a set of rays from the camera onto the mesh and see - // where they intersect - std::vector intersections; - sample_mesh(image, cam, bvh_tree, intersections); - - Eigen::Vector3d plane_normal; - if (plane_normal_str != "") { - // The user wants to override the plane normal - std::istringstream is(plane_normal_str); - if (!(is >> plane_normal[0] >> plane_normal[1] >> plane_normal[2])) - LOG(FATAL) << "Could not parse three values (the plane normal) from: " << plane_normal_str; - std::cout << "Using user-defined normal to the plane: " - << plane_normal.transpose() << std::endl; - } else { - // Find the best fit plane for the projections of the pixels onto the walls - Eigen::Vector3d centroid; - dense_map::bestFitPlane(intersections, centroid, plane_normal); - - std::cout << "Normal to the plane: " << plane_normal.transpose() << std::endl; - - // Make the plane have angles of 45 degrees with the coordinate axes - if (snap_plane) { - dense_map::snapPlaneNormal(plane_normal); - std::cout << "Snapped normal to the plane: " << plane_normal.transpose() << std::endl; - } - } - - plane_normal.normalize(); - - // Create a new coordinate system having the desired plane as its x-y - // plane, and the normal to the plane as its z axis. - Eigen::Matrix3d world2planecoord; - setup_plane_coord_system(cam, plane_normal, world2planecoord); - - cv::Mat orthoimage; - orthoproject(image, world2planecoord, plane_normal, cam, - bvh_tree, intersections, pixel_size, - orthoimage); - - std::cout << "Writing: " << orthoimage_file << std::endl; - cv::imwrite(orthoimage_file, orthoimage); -} - -int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - - if (FLAGS_mesh.empty()) - LOG(FATAL) << "The mesh was not specified."; - - // Load the mesh - mve::TriangleMesh::Ptr mesh; - std::shared_ptr mesh_info; - std::shared_ptr graph; // TODO(oalexan1): Is this necessary? - std::shared_ptr bvh_tree; - dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); - - // Load the camera intrinsics - config_reader::ConfigReader config; - config.AddFile("cameras.config"); - if (!config.ReadFiles()) - LOG(FATAL) << "Could not read the configuration file."; - camera::CameraParameters cam_params(&config, "nav_cam"); - camera::CameraModel cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), - cam_params); - - // Run either a single image or a list of images - std::vector image_list; - if (!FLAGS_image.empty()) { - image_list.push_back(FLAGS_image); - } else if (!FLAGS_image_list.empty()) { - std::ifstream ifs(FLAGS_image_list.c_str()); - std::string image_file; - while (ifs >> image_file) - image_list.push_back(image_file); - } else { - LOG(FATAL) << "Must specify either an image to orthoproject or an image list."; - } - - for (size_t it = 0; it < image_list.size(); it++) - run_orthoproject(FLAGS_snap_plane, - image_list[it], - FLAGS_pixel_size, - FLAGS_plane_normal, - bvh_tree, cam); - - return 0; -} - -#endif - // Project a given image and camera onto a given mesh, and save an .obj file DEFINE_string(mesh, "", "The mesh to project onto."); -DEFINE_string(image, "", "The image to orthoproject. It should be distorted, as extracted from a bag."); +DEFINE_string(image, "", "The image to orthoproject. It should be distorted, as extracted " + "from a bag."); DEFINE_string(camera_to_world, "", "The camera to world file, as written by the geometry mapper. For example: " @@ -389,18 +46,27 @@ DEFINE_string(camera_to_world, "", DEFINE_string(camera_name, "", "The the camera name. For example: 'sci_cam'."); -DEFINE_string(output_dir, "", "The output texture directory. The texture name will be /run.obj."); +DEFINE_string(image_list, "", "The list of images to orthoproject, one per line, " + "unless specified individually as above."); + +DEFINE_string(camera_list, "", "The list of cameras to world transforms use, one per line, " + "unless specified individually as above."); + +DEFINE_int32(num_exclude_boundary_pixels, 0, + "Exclude pixels closer to the boundary than this when texturing."); + +DEFINE_string(output_prefix, "", "The output prefix. The texture name will be" + " -.obj."); int main(int argc, char** argv) { ff_common::InitFreeFlyerApplication(&argc, &argv); - if (FLAGS_mesh.empty() || FLAGS_image.empty() || FLAGS_camera_to_world.empty() || FLAGS_camera_name.empty() || - FLAGS_output_dir.empty()) - LOG(FATAL) << "Not all inputs were specified."; + if ((FLAGS_image.empty() || FLAGS_camera_to_world.empty()) && + (FLAGS_image_list.empty() || FLAGS_camera_list.empty())) + LOG(FATAL) << "Must specify either an image and camera, or an image list and camera list."; - if (!boost::filesystem::exists(FLAGS_output_dir)) - if (!boost::filesystem::create_directories(FLAGS_output_dir) || !boost::filesystem::is_directory(FLAGS_output_dir)) - LOG(FATAL) << "Failed to create directory: " << FLAGS_output_dir; + if (FLAGS_mesh.empty() || FLAGS_camera_name.empty() || FLAGS_output_prefix.empty()) + LOG(FATAL) << "Not all inputs were specified."; // Load the mesh mve::TriangleMesh::Ptr mesh; @@ -409,9 +75,6 @@ int main(int argc, char** argv) { std::shared_ptr bvh_tree; dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); - std::cout << "Reading image: " << FLAGS_image << std::endl; - cv::Mat image = cv::imread(FLAGS_image); - // Load the camera intrinsics config_reader::ConfigReader config; config.AddFile("cameras.config"); @@ -419,47 +82,37 @@ int main(int argc, char** argv) { std::cout << "Using camera: " << FLAGS_camera_name << std::endl; camera::CameraParameters cam_params(&config, FLAGS_camera_name.c_str()); - std::cout << "Reading pose: " << FLAGS_camera_to_world << std::endl; - Eigen::Affine3d cam_to_world; - dense_map::readAffine(cam_to_world, FLAGS_camera_to_world); - std::cout << "Read pose\n" << cam_to_world.matrix() << std::endl; - - std::vector face_vec; - std::map uv_map; - int num_exclude_boundary_pixels = 0; - - std::vector const& faces = mesh->get_faces(); - int num_faces = faces.size(); - std::vector smallest_cost_per_face(num_faces, 1.0e+100); - - camera::CameraModel cam(cam_to_world.inverse(), cam_params); - - // Find the UV coordinates and the faces having them - std::string out_prefix = "run"; - dense_map::projectTexture(mesh, bvh_tree, image, cam, num_exclude_boundary_pixels, smallest_cost_per_face, face_vec, - uv_map); + std::vector images, cameras; + if (!FLAGS_image.empty() && !FLAGS_camera_to_world.empty()) { + images.push_back(FLAGS_image); + cameras.push_back(FLAGS_camera_to_world); + } else { + std::string val; + std::ifstream ifs(FLAGS_image_list); + while (ifs >> val) + images.push_back(val); + ifs.close(); + ifs = std::ifstream(FLAGS_camera_list); + while (ifs >> val) + cameras.push_back(val); + ifs.close(); + } - std::string obj_str; - dense_map::formObjCustomUV(mesh, face_vec, uv_map, out_prefix, obj_str); + if (images.size() != cameras.size()) + LOG(FATAL) << "As many images as cameras must be specified.\n"; - std::string mtl_str; - dense_map::formMtl(out_prefix, mtl_str); + for (size_t it = 0; it < images.size(); it++) { + cv::Mat image = cv::imread(images[it]); - std::string obj_file = FLAGS_output_dir + "/" + out_prefix + ".obj"; - std::cout << "Writing: " << obj_file << std::endl; - std::ofstream obj_handle(obj_file); - obj_handle << obj_str; - obj_handle.close(); + Eigen::Affine3d cam_to_world; + dense_map::readAffine(cam_to_world, cameras[it]); - std::string mtl_file = FLAGS_output_dir + "/" + out_prefix + ".mtl"; - std::cout << "Writing: " << mtl_file << std::endl; - std::ofstream mtl_handle(mtl_file); - mtl_handle << mtl_str; - mtl_handle.close(); + std::string prefix = FLAGS_output_prefix + "-" + + boost::filesystem::path(images[it]).stem().string(); - std::string texture_file = FLAGS_output_dir + "/" + out_prefix + ".png"; - std::cout << "Writing: " << texture_file << std::endl; - cv::imwrite(texture_file, image); + dense_map::meshProject(mesh, bvh_tree, image, cam_to_world.inverse(), cam_params, + FLAGS_num_exclude_boundary_pixels, prefix); + } return 0; } diff --git a/dense_map/geometry_mapper/tools/scale_bag.cc b/dense_map/geometry_mapper/tools/scale_bag.cc index 9b3c9bc2..75f9d9e8 100644 --- a/dense_map/geometry_mapper/tools/scale_bag.cc +++ b/dense_map/geometry_mapper/tools/scale_bag.cc @@ -120,7 +120,8 @@ int main(int argc, char** argv) { // Maybe resize cv::Mat resized_image; if (FLAGS_scale < 1.0) { - cv::resize(*image_ptr, resized_image, cv::Size(), FLAGS_scale, FLAGS_scale, cv::INTER_AREA); + cv::resize(*image_ptr, resized_image, cv::Size(), FLAGS_scale, FLAGS_scale, + cv::INTER_AREA); image_ptr = &resized_image; } diff --git a/dense_map/geometry_mapper/tools/streaming_mapper.cc b/dense_map/geometry_mapper/tools/streaming_mapper.cc index c20e5ee1..5543c951 100644 --- a/dense_map/geometry_mapper/tools/streaming_mapper.cc +++ b/dense_map/geometry_mapper/tools/streaming_mapper.cc @@ -113,9 +113,9 @@ class StreamingMapper { camera::CameraParameters m_texture_cam_params; sensor_msgs::Image m_texture_obj_msg, m_texture_mtl_msg; - double m_navcam_to_texture_cam_timestamp_offset; - Eigen::MatrixXd m_texture_cam_to_navcam_trans; - Eigen::MatrixXd m_navcam_to_body_trans; + double m_nav_cam_to_texture_cam_timestamp_offset; + Eigen::MatrixXd m_texture_cam_to_nav_cam_trans; + Eigen::MatrixXd m_nav_cam_to_body_trans; std::string nav_cam_pose_topic, ekf_state_topic, ekf_pose_topic, texture_cam_topic, sci_cam_exif_topic, m_texture_cam_type, mesh_file; @@ -164,9 +164,9 @@ class StreamingMapper { StreamingMapper::StreamingMapper() : m_texture_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)), - m_navcam_to_texture_cam_timestamp_offset(0.0), - m_texture_cam_to_navcam_trans(Eigen::MatrixXd::Identity(4, 4)), - m_navcam_to_body_trans(Eigen::MatrixXd::Identity(4, 4)), + m_nav_cam_to_texture_cam_timestamp_offset(0.0), + m_texture_cam_to_nav_cam_trans(Eigen::MatrixXd::Identity(4, 4)), + m_nav_cam_to_body_trans(Eigen::MatrixXd::Identity(4, 4)), m_last_processed_cam_ctr(Eigen::Vector3d(std::numeric_limits::quiet_NaN(), 0.0, 0.0)), m_processed_camera_count(0) {} @@ -196,29 +196,29 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { std::vector nav_to_cam_trans; std::vector nav_to_cam_timestamp_offsets; Eigen::Affine3d hazcam_depth_to_image_transform; - Eigen::Affine3d navcam_to_body_trans; + Eigen::Affine3d nav_cam_to_body_trans; dense_map::readConfigFile( // Inputs cam_names, "nav_cam_transform", // this is the nav cam to body transform "haz_cam_depth_to_image_transform", // Outputs - cam_params, nav_to_cam_trans, nav_to_cam_timestamp_offsets, navcam_to_body_trans, + cam_params, nav_to_cam_trans, nav_to_cam_timestamp_offsets, nav_cam_to_body_trans, hazcam_depth_to_image_transform); - m_navcam_to_body_trans = navcam_to_body_trans.matrix(); + m_nav_cam_to_body_trans = nav_cam_to_body_trans.matrix(); { // Note the lock, because m_texture_cam_params is a shared resource const std::lock_guard lock(texture_cam_info_lock); // Index 0 below, based on the order in cam_names - m_texture_cam_to_navcam_trans = nav_to_cam_trans[0].inverse().matrix(); - m_navcam_to_texture_cam_timestamp_offset = nav_to_cam_timestamp_offsets[0]; - m_texture_cam_params = cam_params[0]; + m_texture_cam_to_nav_cam_trans = nav_to_cam_trans[0].inverse().matrix(); + m_nav_cam_to_texture_cam_timestamp_offset = nav_to_cam_timestamp_offsets[0]; + m_texture_cam_params = cam_params[0]; - std::cout << "m_navcam_to_texture_cam_timestamp_offset: " - << m_navcam_to_texture_cam_timestamp_offset << "\n"; + std::cout << "nav_cam_to_texture_cam_timestamp_offset: " + << m_nav_cam_to_texture_cam_timestamp_offset << "\n"; std::cout << "texture cam focal vector: " << m_texture_cam_params.GetFocalVector().transpose() << "\n"; } @@ -228,7 +228,8 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { std::string mapper_img_topic = "/ism/" + m_texture_cam_type + "/img"; std::string mapper_obj_topic = "/ism/" + m_texture_cam_type + "/obj"; std::string mapper_mtl_topic = "/ism/" + m_texture_cam_type + "/mtl"; - ROS_INFO_STREAM("Publishing topics: " << mapper_img_topic << ' ' << mapper_obj_topic << ' ' << mapper_mtl_topic); + ROS_INFO_STREAM("Publishing topics: " << mapper_img_topic << ' ' + << mapper_obj_topic << ' ' << mapper_mtl_topic); texture_img_pub = nh.advertise(mapper_img_topic, 1); texture_obj_pub = nh.advertise(mapper_obj_topic, 1); texture_mtl_pub = nh.advertise(mapper_mtl_topic, 1); @@ -345,24 +346,30 @@ void StreamingMapper::ReadParams(ros::NodeHandle const& nh) { ROS_INFO_STREAM("Texture camera type = " << m_texture_cam_type); ROS_INFO_STREAM("Mesh = " << mesh_file); if (!m_sim_mode) { - int num = (!nav_cam_pose_topic.empty()) + (!ekf_state_topic.empty()) + (!ekf_pose_topic.empty()); + int num = (!nav_cam_pose_topic.empty()) + (!ekf_state_topic.empty()) + + (!ekf_pose_topic.empty()); if (num != 1) LOG(FATAL) << "Must specify exactly only one of nav_cam_pose_topic, " << "ekf_state_topic, ekf_pose_topic."; - - ROS_INFO_STREAM("dist_between_processed_cams = " << dist_between_processed_cams); - ROS_INFO_STREAM("max_iso_times_exposure = " << max_iso_times_exposure); - ROS_INFO_STREAM("use_single_texture = " << use_single_texture); - ROS_INFO_STREAM("sci_cam_exposure_correction = " << m_sci_cam_exp_corr); - ROS_INFO_STREAM("pixel_size = " << pixel_size); - ROS_INFO_STREAM("num_threads = " << num_threads); - ROS_INFO_STREAM("sim_mode = " << m_sim_mode); - ROS_INFO_STREAM("save_to_disk = " << save_to_disk); - ROS_INFO_STREAM("num_exclude_boundary_pixels = " << num_exclude_boundary_pixels); - ROS_INFO_STREAM("ASTROBEE_ROBOT = " << getenv("ASTROBEE_ROBOT")); - ROS_INFO_STREAM("ASTROBEE_WORLD = " << getenv("ASTROBEE_WORLD")); } + if (m_sim_mode && m_texture_cam_type == "nav_cam") + LOG(FATAL) << "The streaming mapper does not support nav_cam with simulated data as " + << "its distortion is not modeled.\n"; + + ROS_INFO_STREAM("Streaming mapper parameters:"); + ROS_INFO_STREAM("dist_between_processed_cams = " << dist_between_processed_cams); + ROS_INFO_STREAM("max_iso_times_exposure = " << max_iso_times_exposure); + ROS_INFO_STREAM("use_single_texture = " << use_single_texture); + ROS_INFO_STREAM("sci_cam_exposure_correction = " << m_sci_cam_exp_corr); + ROS_INFO_STREAM("pixel_size = " << pixel_size); + ROS_INFO_STREAM("num_threads = " << num_threads); + ROS_INFO_STREAM("sim_mode = " << m_sim_mode); + ROS_INFO_STREAM("save_to_disk = " << save_to_disk); + ROS_INFO_STREAM("num_exclude_boundary_pixels = " << num_exclude_boundary_pixels); + ROS_INFO_STREAM("ASTROBEE_ROBOT = " << getenv("ASTROBEE_ROBOT")); + ROS_INFO_STREAM("ASTROBEE_WORLD = " << getenv("ASTROBEE_WORLD")); + // For a camera like sci_cam, the word "sci" better be part of texture_cam_topic. std::string cam_name = m_texture_cam_type.substr(0, 3); if (texture_cam_topic.find(cam_name) == std::string::npos) @@ -485,7 +492,7 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, m_texture_mtl_msg.data.resize(mtl_len); std::copy(reinterpret_cast(&mtl_str[0]), // input beg reinterpret_cast(&mtl_str[0]) + mtl_len, // input end - m_texture_mtl_msg.data.begin()); // destination + m_texture_mtl_msg.data.begin()); // destination if (!use_single_texture || processed_camera_count == 0) texture_mtl_pub.publish(m_texture_mtl_msg); @@ -577,13 +584,13 @@ void StreamingMapper::AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3 // Must compensate for the fact that the nav cam, haz cam, and texture cam all // have some time delay among them - double texture_cam_timestamp = nav_cam_timestamp + m_navcam_to_texture_cam_timestamp_offset; + double texture_cam_timestamp = nav_cam_timestamp + m_nav_cam_to_texture_cam_timestamp_offset; // Keep in mind that nav_cam_pose is the transform from the nav cam // to the world. Hence the matrices are multiplied in the order as // below. Eigen::Affine3d texture_cam_pose; - texture_cam_pose.matrix() = nav_cam_pose.matrix() * m_texture_cam_to_navcam_trans; + texture_cam_pose.matrix() = nav_cam_pose.matrix() * m_texture_cam_to_nav_cam_trans; { // Add the latest pose. Use a lock. const std::lock_guard lock(texture_cam_pose_lock); @@ -617,10 +624,10 @@ void StreamingMapper::EkfStateCallback(ff_msgs::EkfState::ConstPtr const& ekf_st tf::poseMsgToEigen(ekf_state->pose, body_pose); // Convert from body pose (body to world transform) to nav cam pose (nav cam to world transform) - // Use the equation: body_to_world = navcam_to_world * body_to_navcam, written equivalently as: - // navcam_to_world = body_to_world * navcam_to_body + // Use the equation: body_to_world = nav_cam_to_world * body_to_nav_cam, written equivalently as: + // nav_cam_to_world = body_to_world * nav_cam_to_body Eigen::Affine3d nav_cam_pose; - nav_cam_pose.matrix() = body_pose.matrix() * m_navcam_to_body_trans; + nav_cam_pose.matrix() = body_pose.matrix() * m_nav_cam_to_body_trans; StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); } @@ -633,10 +640,10 @@ void StreamingMapper::EkfPoseCallback(geometry_msgs::PoseStamped::ConstPtr const tf::poseMsgToEigen(ekf_pose->pose, body_pose); // Convert from body pose (body to world transform) to nav cam pose (nav cam to world transform) - // Use the equation: body_to_world = navcam_to_world * body_to_navcam, written equivalently as: - // navcam_to_world = body_to_world * navcam_to_body + // Use the equation: body_to_world = nav_cam_to_world * body_to_nav_cam, written equivalently as: + // nav_cam_to_world = body_to_world * nav_cam_to_body Eigen::Affine3d nav_cam_pose; - nav_cam_pose.matrix() = body_pose.matrix() * m_navcam_to_body_trans; + nav_cam_pose.matrix() = body_pose.matrix() * m_nav_cam_to_body_trans; StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); } diff --git a/isaac/config/dense_map/streaming_mapper.config b/isaac/config/dense_map/streaming_mapper.config index 34421c28..5085b45e 100644 --- a/isaac/config/dense_map/streaming_mapper.config +++ b/isaac/config/dense_map/streaming_mapper.config @@ -44,8 +44,15 @@ nav_cam_pose_topic = ""; -- A backup topic to get the pose from, rarely used ekf_pose_topic = ""; -texture_cam_type = "sci_cam"; -texture_cam_topic = "/hw/cam_sci/compressed"; +texture_cam_type = "sci_cam"; + +-- Image topic for the camera to texture +texture_cam_topic = "/hw/cam_sci/compressed"; -- sci_cam +-- texture_cam_topic = "/hw/depth_haz/extended/amplitude_int"; -- haz_cam +-- texture_cam_topic = "/hw/cam_nav"; -- nav_cam +-- texture_cam_topic = "/hw/cam_heat"; -- heat_cam +-- texture_cam_topic = "/hw/cam_acoustics"; -- acoustics_cam + dist_between_processed_cams = 0.10; max_iso_times_exposure = 5.1; sci_cam_exposure_correction = true; @@ -55,7 +62,7 @@ pixel_size = 0.001; num_threads = 48; -- This is measured at full resolution, not the reduced resolution --- used in calibration . +-- used in calibration. num_exclude_boundary_pixels = 0; save_to_disk = false; diff --git a/isaac/readme.md b/isaac/readme.md index dce0d4e9..16809ef7 100644 --- a/isaac/readme.md +++ b/isaac/readme.md @@ -133,12 +133,8 @@ and then the bot can be moved via: rosrun mobility teleop -move -pos "11.0 -5.0 5.0" -tolerance_pos 0.0001 -att "0 0 0 1" -3. Start the bot with the acoustics camera on. +3. ISAAC supports a simulated acoustics camera. See: - source ~/freeflyer_build/native/devel/setup.zsh - source ~/projects/isaac/devel/setup.zsh - roslaunch isaac sim.launch world:=iss rviz:=true acoustics_cam:=true \ - pose:="11.2 -7.72 5.0 0 0 0 0 1" - -See isaac/hardware/acoustics_cam/readme.md for more details. + astrobee/simulation/acoustics_cam/readme.md +for how to use it with the simulator. From 4bbcce1850758650881fd182d83c558c1420d25a Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Fri, 7 Jan 2022 11:16:59 -0800 Subject: [PATCH 22/40] Calibration update and fixes for panorama handling (#22) * Rm some repetition and minor wording fix * refiner2: incorporate registration * After much testing, overwrite the previous camera refiner * Doc improvements * Improvements for handling panoramas and some robustness issues Improvements for handling panoramas and some robustness issues --- dense_map/geometry_mapper/CMakeLists.txt | 4 +- .../geometry_mapper/include/dense_map_utils.h | 6 + dense_map/geometry_mapper/readme.md | 596 ++- .../geometry_mapper/src/dense_map_utils.cc | 17 + .../geometry_mapper/tools/camera_refiner.cc | 3894 ++++++++++------- .../geometry_mapper/tools/camera_refiner2.cc | 2713 ------------ .../tools/camera_refiner_old.cc | 2268 ++++++++++ .../geometry_mapper/tools/geometry_mapper.cc | 101 +- .../geometry_mapper/tools/geometry_mapper.py | 21 +- .../geometry_mapper/tools/streaming_mapper.cc | 149 +- .../config/dense_map/streaming_mapper.config | 33 +- 11 files changed, 5072 insertions(+), 4730 deletions(-) delete mode 100644 dense_map/geometry_mapper/tools/camera_refiner2.cc create mode 100644 dense_map/geometry_mapper/tools/camera_refiner_old.cc diff --git a/dense_map/geometry_mapper/CMakeLists.txt b/dense_map/geometry_mapper/CMakeLists.txt index 7ddbc686..9610b9a2 100644 --- a/dense_map/geometry_mapper/CMakeLists.txt +++ b/dense_map/geometry_mapper/CMakeLists.txt @@ -175,8 +175,8 @@ add_executable(camera_refiner tools/camera_refiner.cc) target_link_libraries(camera_refiner geometry_mapper_lib gflags glog) -add_executable(camera_refiner2 tools/camera_refiner2.cc) -target_link_libraries(camera_refiner2 +add_executable(camera_refiner_old tools/camera_refiner_old.cc) +target_link_libraries(camera_refiner_old geometry_mapper_lib gflags glog) add_executable(extract_pc_from_bag tools/extract_pc_from_bag.cc) diff --git a/dense_map/geometry_mapper/include/dense_map_utils.h b/dense_map/geometry_mapper/include/dense_map_utils.h index 08c83499..fcdc3360 100644 --- a/dense_map/geometry_mapper/include/dense_map_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_utils.h @@ -56,6 +56,7 @@ void parse_intrinsics_to_float(std::string const& intrinsics_to_float, // A function to split a string like 'haz_cam sci_cam' into // its two constituents and validate against the list of known cameras. void parse_extrinsics_to_float(std::vector const& cam_names, + std::string const& ref_cam_name, std::string const& depth_to_image_name, std::string const& extrinsics_to_float, std::set& extrinsics_to_float_set); @@ -124,6 +125,11 @@ Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, bool findInterpPose(double desired_time, std::map const& poses, Eigen::Affine3d& interp_pose); +// Implement some heuristic to find the maximum rotation angle that can result +// from applying the given transform. It is assumed that the transform is not +// too different from the identity. +double maxRotationAngle(Eigen::Affine3d const& T); + // A class to store timestamped poses, implementing O(log(n)) linear // interpolation at a desired timestamp. For fast access, keep the // poses in bins obtained by flooring the timestamp, which is measured diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 769f3f2b..c433f740 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -19,8 +19,8 @@ coming from an Astrobee robot. The two main tools are: The following environmental variables should be set up (please adjust them for your particular configuration): - export ASTROBEE_SOURCE_PATH=$HOME/astrobee/src - export ASTROBEE_BUILD_PATH=$HOME/astrobee + export ASTROBEE_WS=$HOME/astrobee + export ASTROBEE_SOURCE_PATH=$ASTROBEE_WS/src export ISAAC_WS=$HOME/isaac ## Robot sensors @@ -28,9 +28,9 @@ them for your particular configuration): This software makes use of three sensors that are mounted on the front face of the robot: - - haz cam: A low-resolution depth and intensity camera - - nav cam: A wide field of view medium-resolution navigation camera - - sci cam: A high-resolution narrow field of view "science" camera + - haz cam: A low-resolution depth and intensity camera + - nav cam: A wide field of view medium-resolution navigation camera + - sci cam: A high-resolution narrow field of view "science" camera The nav cam is used to determine robot's position and orientation as it explores its environment. @@ -170,7 +170,7 @@ recording profile. A step-by-step procedure is outlined below if a recording profile has not been set up. First give the bot the ability to acquire intensity data with the -depth camera (haz_cam). For that, connect to the MLP processor of the +depth camera (haz_cam). For that, connect to the MLP processor of the bot. Edit the file: /opt/astrobee/config/cameras.config @@ -234,7 +234,7 @@ earlier in the text. Start the simulator, such as: - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash roslaunch isaac sim.launch rviz:=true \ pose:="11.0 -7.0 5.0 0 0 0 1" world:=iss @@ -270,7 +270,7 @@ If desired to record data for many cameras, these topics must be specified for each of them. For example, for ``heat_cam`` add the lines: - /hw/cam_heat /sim/heat_cam/pose /sim/heat_cam/info + /hw/cam_heat /sim/heat_cam/pose /sim/heat_cam/info to the ``rosbag record`` command. @@ -288,7 +288,7 @@ This applies only to real data. If the recorded data is split into many small bags, as it often happens on the ISS, those bags should be first merged as documented in: - $ASTROBEE_SOURCE_PATH/localization/sparse_mapping/readme.md + $ASTROBEE_SOURCE_PATH/localization/sparse_mapping/readme.md In order to save bandwidth, the sci cam images are published at a reduced resolution (usually 640x480 pixels), while the full-resolution @@ -311,8 +311,8 @@ aperture, and focal length) will be saved in the bag file as well. (Normally the sci cam data will be downloaded from the HLP of ISS robots using some established infrastructure. Alternatively, one can -use `adb pull`. After this tool is used, the data can be manually -deleted from HLP by first connecting to it with `adb shell`.) +use ``adb pull``. After this tool is used, the data can be manually +deleted from HLP by first connecting to it with ``adb shell``.) In order to run camera_calibrator, the sci cam data in the bag needs to be decompressed, resized to 1/4 of the resolution, and made to be @@ -333,7 +333,7 @@ into the bag, one can do the following: --output_bag output.bag --image_type grayscale --scale 0.25 Note that the processed sci cam images will be now on topic -`/hw/cam_sci2`. +``/hw/cam_sci2``. ## Camera calibration @@ -410,7 +410,7 @@ The calibrator program is at: It can be used to calibrate the intrinsics of the nav and haz camera pair, and then of the sci and haz pair. These are referred to as -`cam1` and `cam2` below. +``cam1`` and ``cam2`` below. It is important to remember that the haz cam records both an image intensity and a depth point cloud. @@ -541,20 +541,20 @@ refiner can refine the sci cam intrinsics and will likely do a better job. It was found experimentally that the depth to image transform -which is updated by `--update_depth_to_image_transform` +which is updated by ``--update_depth_to_image_transform`` depends very much on how far the calibration target is from the -camera. The value of `hazcam_depth_to_image_transform` already in the +camera. The value of ``hazcam_depth_to_image_transform`` already in the robot config file, which shows roughly a scale transform with a factor of 0.95 is good enough. One has to be also be careful with the option -`--timestamp_offset_sampling`, and even avoid using it in a first +``--timestamp_offset_sampling``, and even avoid using it in a first pass. Notice that here we chose to not update the intrinsics of cam1 (nav_cam). That is because this camera was calibrated with Kalibr a while ago and it is known to be accurate. If desired to calibrate it, one can add the option -`--update_cam1`. +`--update_cam1``. Only if after using the geometry mapper one notices visible registration errors (which manifest themselves as artifacts in the @@ -574,11 +574,11 @@ can go as follows: --update_extrinsics \ --timestamp_offset_sampling '-0.35 -0.15 11' -As before, one better not use the option `--timestamp_offset_sampling` +As before, one better not use the option ``--timestamp_offset_sampling`` unless one is sure it is necessary. Note that this time we optimize the intrinsics of cam1 (sci_cam) -and we do not use `--update_depth_to_image_transform` or optimize +and we do not use ``--update_depth_to_image_transform`` or optimize the intrinsics of cam2 (haz_cam) as this was already done earlier. We do not optimize the distortion of cam1 as that can result in incorrect values if there are not enough measurements at image periphery. @@ -593,11 +593,11 @@ down this document, in the section on camera refinement. Nav cam images can be extracted from a bag as follows: - $ASTROBEE_BUILD_PATH/devel/lib/localization_node/extract_image_bag \ - mydata.bag -image_topic /mgt/img_sampler/nav_cam/image_record \ + $ASTROBEE_WS/devel/lib/localization_node/extract_image_bag \ + mydata.bag -image_topic /mgt/img_sampler/nav_cam/image_record \ -output_directory nav_data -use_timestamp_as_image_name -The last option, `-use_timestamp_as_image_name`, must not be missed. +The last option, ``-use_timestamp_as_image_name``, must not be missed. It makes it easy to look up the image acquisition based on image name, and this is used by the geometry mapper. @@ -606,8 +606,8 @@ sampler was not used, the nav cam topic would be /hw/cam_nav. To extract the sci cam data, if necessary, do: - $ASTROBEE_BUILD_PATH/devel/lib/localization_node/extract_image_bag \ - mydata.bag -image_topic /hw/cam_sci/compressed \ + $ASTROBEE_WS/devel/lib/localization_node/extract_image_bag \ + mydata.bag -image_topic /hw/cam_sci/compressed \ -output_directory sci_data -use_timestamp_as_image_name To extract the depth clouds, which may be useful for debugging purposes, @@ -630,6 +630,11 @@ reference documentation in and also in build_map.md in that repository. +If the map to be built is large, consider using the Theia SfM +software. Its usage is described in: + + https://nasa.github.io/astrobee/html/theia_map.html + This SURF map will be used with the geometry mapper. Rebuild it with BRISK features, to be used with the streaming mapper. Examine the BRISK obtained map. If it does not have enough features, rebuild it @@ -637,7 +642,7 @@ with a lower value of -default_brisk_threshold and -max_brisk_threshold (For example, use 70 instead of the default of 90. This may make the sparse map bigger.) -It is suggested to not use the `--histogram_equalization` flag for the +It is suggested to not use the ``--histogram_equalization`` flag for the SURF map, but to use it with the BRISK map. Don't forget to set: @@ -666,14 +671,14 @@ configuration file and a topic for it is in the bag file (see more details further down). The geometry mapper can handle both color and grayscale images, and, for sci cam, both full and reduced resolution. -Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, -`ASTROBEE_BUILD_PATH`, and `ISAAC_WS` as earlier. Run: +Ensure that the bot name is correct below. Set ``ASTROBEE_SOURCE_PATH``, +`ASTROBEE_WS``, and ``ISAAC_WS`` as earlier. Run: export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss export ASTROBEE_ROBOT=bsharp2 - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash python $ISAAC_WS/src/dense_map/geometry_mapper/tools/geometry_mapper.py \ --ros_bag data.bag \ @@ -686,6 +691,7 @@ Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, --duration 1e+10 \ --sampling_spacing_seconds 5 \ --dist_between_processed_cams 0.1 \ + --angle_between_processed_cams 5.0 \ --depth_exclude_columns 0 \ --depth_exclude_rows 0 \ --foreshortening_delta 5.0 \ @@ -698,7 +704,7 @@ Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, --max_iso_times_exposure 5.1 \ --smoothing_time 5e-6 \ --max_num_hole_edges 8000 \ - --max_hole_diameter 1.8 \ + --max_hole_diameter 0.3 \ --num_min_faces_in_component 100 \ --num_components_to_keep 100 \ --edge_keep_ratio 0.2 \ @@ -726,9 +732,12 @@ Parameters: --duration: For how many seconds to do the processing. --sampling_spacing_seconds: How frequently to sample the sci and haz cameras, in seconds. The default is 2. - --dist_between_processed_cams: Once an image or depth image is processed, - how far the camera should move (in meters) before it should process - more data. The default is 0.1 meters. + --dist_between_processed_cams: Once an image or depth cloud is processed, + process a new one whenever either the camera moves by more than this + distance, in meters, or the angle changes by more than + --angle_between_processed_cams, in degrees. The default is 0.1. + --angle_between_processed_cams: See --dist_between_processed_cams. The + default is 5.0. --sci_cam_timestamps: Process only these sci cam timestamps (rather than any in the bag using --dist_between_processed_cams, etc.). Must be a file with one timestamp per line. @@ -761,7 +770,8 @@ Parameters: --voxblox_integrator: When fusing the depth point clouds use this VoxBlox method. Options are: "merged", "simple", and "fast". The default is "merged". - --voxel_size is the grid size used for binning the points, in meters. + --voxel_size: The grid size used for binning depth cloud points and + creating the mesh. Measured in meters. --max_iso_times_exposure: Apply the inverse gamma transform to images, multiply them by max_iso_times_exposure/ISO/exposure_time to adjust for lightning differences, then apply the gamma @@ -793,9 +803,6 @@ Parameters: reliable. In this case one must specify carefully the range of times in the bag to use as it will no longer be constrained by the timestamps in the map. - --sci_cam_timestamps: Process only these sci cam timestamps (rather than - any in the bag using --dist_between_processed_cams, etc.). Must be a - file with one timestamp per line. --start_step: Start processing at this step. Useful for resuming work. Values: 0 (determine poses), 1 (fuse meshes), 2 (smoothe mesh), 3 (fill holes), 4 (clean mesh and rm small connected @@ -831,8 +838,9 @@ with Meshlab: - data_dir/hole_filled_mesh2.ply: A further hole-filled mesh. - data_dir/simplified_mesh.ply: The simplified mesh. - data_dir/smooth_mesh3.ply: A further smoothed version of the mesh. - - data_dir/nav_cam_texture/run.obj: The mesh overlayed with the nav cam texture. - data_dir/sci_cam_texture/run.obj: The mesh overlayed with the sci cam texture. + - data_dir/nav_cam_texture/run.obj: The mesh overlayed with the nav cam texture. + - data_dir/haz_cam_texture/run.obj: The mesh overlayed with the haz cam texture. (Several passes of smoothing and hole-filling, as done above, appear necessary from experiments.) @@ -844,12 +852,12 @@ of the pipeline (invocation of the texrecon tool) can be redone. To run this tool it is suggested to pick a portion of the bag where the images face the wall as much as possible, so one may need to -change the `--start` and `--duration` values. +change the ``--start`` and ``--duration`` values. -Unless the flag `--use_brisk_map` is set, the data processing will be +Unless the flag ``--use_brisk_map`` is set, the data processing will be restricted to the range of timestamps contained within the sparse map -(this is another restriction, in addition to `--start` and -`--duration`). +(this is another restriction, in addition to ``--start`` and +`--duration``). If this tool is too slow, or if localization fails, consider adjusting the --localization_options above. For example, to make localization @@ -897,7 +905,7 @@ camera must be present in the bag file at the the specified topic. ## With simulated data The geometry mapper works with any simulated cameras not having -distortion. It was tested to work with simulated images for +distortion. It was tested to work with simulated images for ``sci_cam``, ``haz_cam``, ``heat_cam``, and ``acoustics_cam``. It does not work with ``nav_cam``, which has distortion. @@ -937,6 +945,7 @@ Example of running the geometry mapper with simulated data: --output_dir data_dir \ --sampling_spacing_seconds 2 \ --dist_between_processed_cams 0.1 \ + --angle_between_processed_cams 5.0 \ --verbose It is important to check for the correct names for the camera image @@ -960,7 +969,7 @@ obtained textured model to be visualized. To run the streaming mapper with real data for the given bot, do: - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config @@ -1049,7 +1058,7 @@ sci_cam, and analogously for other cameras) with: rosbag record /ism/sci_cam/img /ism/sci_cam/obj /ism/sci_cam/mtl \ -b 10000 -The recording should start before the input bag is played. The `-b` +The recording should start before the input bag is played. The ``-b`` option tells ROS to increase its recording buffer size, as sometimes the streaming mapper can publish giant meshes. @@ -1082,10 +1091,10 @@ map with BRISK features, histogram equalization, and a vocabulary database to find the nav cam poses. The command for building such a BRISK map from a registered SURF map is: - cp surf_map.map brisk_map.map - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/build_map \ - --output_map brisk_map.map --rebuild --histogram_equalization \ - --vocab_db + cp surf_map.map brisk_map.map + $ASTROBEE_WS/devel/lib/sparse_mapping/build_map \ + --output_map brisk_map.map --rebuild --histogram_equalization \ + --vocab_db See: @@ -1156,9 +1165,12 @@ The ``streaming_mapper.config`` file has following fields: overlay. The default value is /hw/cam_sci/compressed and see note in the text for other cameras. - dist_between_processed_cams: Once an image is textured and - published, how far the camera should move (in meters) before - it should process another texture (any images arriving - in between will be ignored). The default is 0.1 meters. + published, process a new one whenever either the camera moves by + more than this distance, in meters, or the angle changes by more + than angle_between_processed_cams, in degrees. The default is + 0.1. + - angle_between_processed_cams: See: dist_between_processed_cams. + The default is 5.0. - max_iso_times_exposure: Apply the inverse gamma transform to images, multiply them by max_iso_times_exposure/ISO/exposure_time to adjust for lightning differences, then apply the gamma @@ -1204,7 +1216,7 @@ textures. To launch the streaming mapper, do: - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config @@ -1248,8 +1260,12 @@ products, and between the nav cam and sci cam textures. Once a dataset of the robot flying around and performing inspections is acquired, so in realistic conditions, rather than with a calibration target, it can be used to further refine the camera -calibration file. This refinement will change the transforms between -the cameras and the intrinsics of the sci cam. +calibration file, including the intrinsics and extrinsics. + +The calibration step above can be avoided altogether, and this robot's +desired transforms to be refined can be initialized with values from a +different robot or with the placeholder values already present in a +given robot's config file. To avoid issues with accuracy of the timestamps of the images, we assume that the robot is paused, or otherwise moves very slowly, @@ -1258,14 +1274,14 @@ following approach should be taken. ### Image selection -Select a set of nav cam images around 1 second before and after each -sci cam image using the tool: +Select a set of nav cam images shortly before and after each sci cam +image using the image_picker tool: export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss export ASTROBEE_ROBOT=bsharp2 - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash $ISAAC_WS/devel/lib/geometry_mapper/image_picker \ --ros_bag mybag.bag \ @@ -1295,16 +1311,15 @@ used if the given bag is suspected to have a different value of this offset. Such an option the option can be passed also to the camera refiner below and to the geometry mapper. -Examine these images. Since they are used for bracketing, -there will be pairs of very similar images. Yet, it is good -that there should be no excessive overlap otherwise, so the images -in the first pair better have about 3/4 or 4/5 overlap with -images from other pairs. +Examine these images. Since they are used for bracketing, there will +be pairs of very similar images. Yet, it is good that there should be +no excessive overlap otherwise, so the images in the first pair better +have about 4/5 overlap with images from other pairs. -If necessary, add more intermediate images by re-running -this tool with +If necessary, add more intermediate images by re-running this tool +with: - --max_time_between_images + --max_time_between_images It is good to not allow too many images or excessive overlap, but, if removing excessive images, ensure that each sci cam image is still @@ -1328,7 +1343,7 @@ above: dir=nav_images images=$(ls $dir/*jpg) surf_map=${dir}_surf.map - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/build_map \ + $ASTROBEE_WS/devel/lib/sparse_mapping/build_map \ --output_map $surf_map --feature_detection --feature_matching \ --track_building --incremental_ba --bundle_adjustment \ --min_valid_angle 1.0 --num_subsequent_images 20 $images @@ -1339,6 +1354,10 @@ will prevent having too many features which result in small convergence angles between the rays, which may make map-building less stable. +If the map makes a closed loop, and, for example, image 80 becomes +similar to image 0, one should increase --num_subsequent_images to +perhaps 90. This would result in increased runtime but a better map. + Register the map. That can be done, for example, by merging this map with a registered map, bundle-adjusting the obtained map, re-registering it, then extracting the submap corresponding to the @@ -1351,13 +1370,13 @@ map, say showing all the walls as our map, and merge and register our map to this registered submap. That goes as follows. Examine the existing registered map in nvm_visualize and record in a -list named `list.txt` the images which are similar to the ones in the +list named ``list.txt`` the images which are similar to the ones in the map we want to register, one per line (those are printed on the screen as one navigates through the map in the viewer). A submap of the registered map can then be extracted, without bundle-adjustment (to not affect the registration) as: - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/extract_submap \ + $ASTROBEE_WS/devel/lib/sparse_mapping/extract_submap \ --skip_bundle_adjustment --input_map registered_map.map \ --output_map registered_submap.map \ --skip_bundle_adjustment \ @@ -1375,27 +1394,23 @@ registration.) Our map can be merged into this map without modifying the first map, and hence keeping its registration, as: - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/merge_maps \ - --fix_first_map \ - --num_image_overlaps_at_endpoints 200 \ - --min_valid_angle 1.0 \ - registered_submap.map $surf_map \ + $ASTROBEE_WS/devel/lib/sparse_mapping/merge_maps \ + --fix_first_map \ + --num_image_overlaps_at_endpoints 200 \ + --min_valid_angle 1.0 \ + registered_submap.map $surf_map \ --output_map merged.map The desired now-registered map can then be extracted as: - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/extract_submap \ - --skip_bundle_adjustment \ - --input_map merged.map --output_map ${dir}_surf_reg.map \ + $ASTROBEE_WS/devel/lib/sparse_mapping/extract_submap \ + --skip_bundle_adjustment \ + --input_map merged.map --output_map ${dir}_surf_reg.map \ ${dir}/*jpg Here, $dir points to nav_images as earlier in the document. -### Running the refiner - -TODO(oalexan1): Rewrite this for refiner2. Mention that the choice of -weights 1000, 0, 0 works well, and an extra tune-up can be achieved -for bsharp2 with 20, 20, 20. +### Running camera_refiner Next, the refiner tool is run, as shown below. This will overwrite the camera calibration file, so it may be prudent to start by copying the @@ -1406,140 +1421,319 @@ point to that. export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss export ASTROBEE_ROBOT=bsharp2 - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash + + float="optical_center focal_length distortion" $ISAAC_WS/devel/lib/geometry_mapper/camera_refiner \ - --ros_bag mybag.bag --sparse_map mymap.map \ - --num_iterations 20 --bracket_len 0.6 \ - --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ + --ros_bag mybag.bag \ + --sparse_map input_map.map \ + --mesh mesh.ply \ --output_map output_map.map \ - --fix_map --skip_registration --float_scale \ - --timestamp_interpolation --robust_threshold 3 \ - --sci_cam_intrinsics_to_float \ - 'focal_length optical_center distortion' \ - --mesh mesh.ply --mesh_weight 25 \ - --mesh_robust_threshold 3 - -Note how we used the same bracket length as in the image picker. It -is very important to note that a tight bracket should be used, as, if -the robot moves fast and the bracket value is big, it can affect the + --bracket_len 0.6 \ + --depth_tri_weight 1000 \ + --mesh_tri_weight 0 \ + --depth_mesh_weight 0 \ + --sci_cam_intrinsics_to_float "$float" \ + --nav_cam_intrinsics_to_float "$float" \ + --haz_cam_intrinsics_to_float "$float" \ + --affine_depth_to_image \ + --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ + --num_overlaps 10 \ + --out_texture_dir out_texture_dir + +Here it was chosen to pass in a mesh from a previous invocation of the +geometry mapper for this robot and the given registered sparse map (for +example, the ``fused.ply`` mesh can be used). That is optional, and it +is needed only one adds to camera_refiner the constraints that the +triangulated points and haz cam clouds stay close to the mesh, when +positive values should be used for ``--mesh_tri_weight`` and +``--depth_mesh_weight``, and if desired to use the option +``--out_texture_dir``. + +Sometimes such a mesh can help with convergence, but should not be +used in a first attempt at calibration. It was not used for the final +bumble robot calibration, when the weights were as above, but was used +for the bsharp2 robot, when all three weights above were set to 25. + +Note that sometimes it is desired to not change the nav_cam +intrinsics, such as for the bumble robot, as existing ISS maps depend +on it, and then one should set `--nav_cam_intrinsics_to_float ""` +above. + +We used the same bracket length as in the image picker. It is very +important to note that a tight bracket should be used, as, if the +robot moves fast and the bracket value is big, it can affect the accuracy of calibration. Yet a tight bracket does not allow for wiggle room if it is decided to vary the timestamp offset (see further down) while staying within the bounds given by the bracket. -This tool will print some statistics showing the reprojection -residuals for all camera combinations. This can be helpful -in figuring out if the value of nav_cam_to_sci_cam_timestamp_offset -is correct. If this value is not known well, this tool can be -run with zero or more iterations and various values of +This tool will print some statistics showing the residual errors +before and after each optimization pass (before outlier removal at the +end of the pass), as follows: + +The 25, 50, 75, and 100th percentile residual stats after opt +depth_mesh_x_m: 0.0013406233 0.0032581503 0.008524976 1.1929607 (3551 residuals) +depth_mesh_y_m: 0.0013847081 0.0030440421 0.0069465355 1.8053954 (3551 residuals) +depth_mesh_z_m: 0.00095448611 0.001959229 0.003519088 0.37953008 (3551 residuals) +depth_tri_x_m: 0.0016825461 0.0038125877 0.0081855522 2.3030685 (3573 residuals) +depth_tri_y_m: 0.0018114713 0.0042167015 0.0095410033 2.5599911 (3573 residuals) +depth_tri_z_m: 0.0012253625 0.0026915793 0.0054980693 0.82675259 (3573 residuals) +haz_cam_pix_x: 0.27697292 0.5747702 1.1103665 74339.066 (3716 residuals) +haz_cam_pix_y: 0.11680463 0.28538243 0.63062182 26071.257 (3716 residuals) +mesh_tri_x_m: 0.00054776584 0.0019058208 0.0062554694 4.7597716 (24034 residuals) +mesh_tri_y_m: 0.0004664598 0.0018264008 0.0061599388 3.669907 (24034 residuals) +mesh_tri_z_m: 0.00015713815 0.00064641858 0.0021773666 1.062293 (24034 residuals) +nav_cam_pix_x: 0.080328781 0.21718455 0.48093808 618.45992 (206598 residuals) +nav_cam_pix_y: 0.096269135 0.23056959 0.4939937 476.99626 (206598 residuals) +sci_cam_pix_x: 0.048502913 0.23643751 0.53009189 20.129477 (623 residuals) +sci_cam_pix_y: 0.21452953 0.47693424 1.0187402 25.362118 (623 residuals) + +These can be helpful in figuring out if the calibration result is good. +The errors whose name ends in "_m" are in meters and measure +the absolute differences between the depth clouds and mesh +(depth_mesh), between depth clouds and triangulated points +(depth_tri), and between mesh points and triangulated points +(mesh_tri), in x, y, and z, respectively. The ``mesh`` residuals will +be printed only if a mesh is passed on input and if the mesh-related +weights are positive. Some outliers are unavoidable, hence some of +these numbers can be big even if the calibration overall does well +(the robust threshold does not allow outliers to dominate). + +A source of errors (apart from inaccurate intrinsics, extrinsics, or +insufficiently good modeling of the cameras) can be the +nav_cam_to_sci_cam_timestamp_offset, which can be non-zero if the HLP +and MLP/LLP processors are not synchronized (the sci_cam pictures are +acquired with the HLP and nav_cam with MLP/LLP). If this value is not +known well, this tool can be run with zero or more iterations and +various values of --nav_cam_to_sci_cam_offset_override_value -to see which value gives the smallest residuals. The geometry mapper -cam be run with various obtained calibrated files, and see which -causes least amount of registration errors and most agreement between -the nav_cam and sci_cam textures. - -A subset of the stats output is as follows: - - The 25, 50 and 75 percentile residual stats before opt - haznavhaz1: 0.15588849206757516 0.28939657329467749 0.50425806803227147 - haznavhaz2: 0.14321982353646234 0.2810887933143924 0.47081968246287786 - nav1: 0.20133152070741289 0.43912265848302923 0.79663785518573604 - nav2: 0.16416862779482244 0.35723227217511067 0.65759965916544161 - navscinav1: 0.97612783076206711 2.3173228923638192 5.1374205463019109 - navscinav2: 0.73081001251695454 2.3535687733679822 5.6151463474188459 - navscisci1: 0.57405681159248445 1.3846928084816739 3.0725112787539501 - navscisci2: 0.42920106999360996 1.4522176781247396 3.4578275058146062 - -For the first two lines with stats, a 3D point was found for the haz -and nav camera pair (taking into account pixel matches, the depth -measurement, and camera orientations), it was projected into the haz -camera, and the column (first line) and row (second line) absolute -difference residuals were computed, sorted, and given percentiles were -found. For the next two lines the same logic was used, without depth -measurements, by simply triangulating a 3D point from given nav cam -matches and projecting it back into the nav camera. Other lines of -statistics are to be interpreted analogously. - -Above, the options --mesh, --mesh_weight, and --mesh_robust_threshold -were used in order to employ a mesh generated from a previous run of -the geometry mapper with the same dataset as a constraint. This was -found to greatly improve the sci cam intrinsics and extrinsics -parameters as well as the agreement between nav_cam and sci_cam -textures. In a first pass this tool can be used without these mesh -options. +to see which value gives the smallest residuals. + +If the ``--out_texture_dir`` option is specified, the tool will create +textured meshes for each image and optimized camera at the +end. Ideally those textured meshes will agree among each other. This program's options are: - --ros_bag: A ROS bag with recorded nav_cam, haz_cam, and - full-resolution sci_cam data. - --sparse_map: A registered SURF sparse map made with some of - the ROS bag data, and including nav cam images closely - bracketing the sci cam images. - --nav_cam_topic: The nav cam image topic in the bag file. - --haz_cam_points_topic: The haz cam point cloud topic in the bag file. - --haz_cam_intensity_topic: The haz cam intensity topic in the bag file. - --sci_cam_topic: The sci cam image topic in the bag file. - --start: How many seconds into the bag to start processing the data. - --duration: For how many seconds to do the processing. - --max_haz_cam_image_to_depth_timestamp_diff: Use depth - haz cam clouds that are within this distance in time from the - nearest haz cam intensity image. The default is 0.2. - --robust_threshold: Pixel errors much larger than this will be - exponentially attenuated to affect less the cost - function. The default is 1.0. - --num_cam_iterations: How many solver iterations to perform to - solve for cam1 intrinsics. The default is 20. - --bracket_len: Lookup sci and haz cam images only between - consecutive nav cam images whose distance in time is no more than - this (in seconds). It is assumed the robot moves slowly and - uniformly during this time. - --sci_cam_timestamps: Use only these sci cam timestamps. Must be - a file with one timestamp per line. - --float_scale: If to optimize the scale of the clouds (use it if - the sparse map is kept fixed). - --fix_map: Do not optimize the sparse map, hence only the camera - params. - --sci_cam_intrinsics_to_float: Refine 0 or more of the following - intrinsics for sci_cam: focal_length, optical_center, - distortion. Specify as a quoted list. For example: - 'focal_length optical_center distortion'. - --timestamp_interpolation: If true, interpolate between timestamps. - May give better results if the robot is known to move uniformly, and - perhaps less so for stop-and-go robot motion. - --skip_registration: If true, do not re-register the sparse map. - Then the hugin and xyz file options need not be provided. - This may result in the scale not being correct if the sparse map - is not fixed. - --hugin_file: The path to the hugin .pto file used for sparse map + + --ros_bag (string, default = "") + A ROS bag with recorded nav_cam, haz_cam intensity, + full-resolution sci_cam, and haz_cam depth clouds. + + --sparse_map (string, default = "") + A registered SURF sparse map made with some of the ROS bag data, + including nav cam images closely bracketing the sci cam + images. + + --output_map (string, default = "") + Output file containing the updated map. + + --nav_cam_topic (string, default = "/mgt/img_sampler/nav_cam/image_record") + The nav cam topic in the bag file. + + --haz_cam_intensity_topic (string, default = "/hw/depth_haz/extended/amplitude_int") + The depth camera intensity topic in the bag file. + + --sci_cam_topic (string, default = "/hw/cam_sci/compressed") + The sci cam topic in the bag file. + + --haz_cam_points_topic (string, default = "/hw/depth_haz/points") + The depth point cloud topic in the bag file. + + --num_overlaps (int32, default = 10) + How many images (of all camera types) close and forward in time + to match to given image. + + --max_haz_cam_image_to_depth_timestamp_diff (double, default = 0.2) + Use a haz cam depth cloud only if it is within this distance in + time from a given haz cam intensity image. + + --robust_threshold (double, default = 3.0) + Residual pixel errors and 3D point residuals (the latter multiplied + by corresponding weight) much larger than this will be + exponentially attenuated to affect less the cost function. + + --num_iterations (int32, default = 20) + How many solver iterations to perform in calibration. + + --bracket_len (double, default = 0.6) + Lookup sci and haz cam images only between consecutive nav cam + images whose distance in time is no more than this (in seconds), + after adjusting for the timestamp offset between these + cameras. It is assumed the robot moves slowly and uniformly + during this time. A large value here will make the refiner + compute a poor solution but a small value will prevent enough + sci_cam images being bracketed. + + --nav_cam_intrinsics_to_float (string, default = "") + Refine given nav_cam intrinsics. Specify as a quoted list. For + example: 'focal_length optical_center distortion'. + + --haz_cam_intrinsics_to_float (string, default = "") + Refine given haz_cam intrinsics. Specify as a quoted list. For + example: 'focal_length optical_center distortion'. + + --sci_cam_intrinsics_to_float (string, default = "") + Refine given sci_cam intrinsics. Specify as a quoted list. For + example: 'focal_length optical_center distortion'. + + --extrinsics_to_float (string, default = "haz_cam sci_cam depth_to_image") + Specify the cameras whose extrinsics to float, relative to + nav_cam. Also consider if to float the haz_cam depth_to_image + transform. + + --float_scale (bool, false unless specified) + If to optimize the scale of the clouds, part of + haz_cam_depth_to_image_transform (use it if the sparse map is + kept fixed, or else rescaling and registration of the map and + extrinsics is needed). This parameter should not be used with + --affine_depth_to_image when the transform is affine, rather + than rigid and a scale. See also --extrinsics_to_float. + + --float_sparse_map (bool, false unless specified) + Optimize the sparse map. This should be avoided as it can + invalidate the scales of the extrinsics and the registration. It + should at least be used with big mesh weights to attenuate those + effects. See also: --registration. + + --float_timestamp_offsets (bool, false unless specified) + If to optimize the timestamp offsets among the cameras. + + --nav_cam_num_exclude_boundary_pixels (int32, default = 0) + Flag as outliers nav cam pixels closer than this to the + boundary, and ignore that boundary region when texturing with + the --out_texture_dir option. This may improve the calibration + accuracy, especially if switching from fisheye to radtan + distortion for nav_cam. See also the geometry_mapper + --undistorted_crop_wins option. + + --timestamp_offsets_max_change (double, default = 1.0) + If floating the timestamp offsets, do not let them change by + more than this (measured in seconds). Existing image bracketing + acts as an additional constraint. + + --nav_cam_to_sci_cam_offset_override_value (double, default = NaN) + Override the value of nav_cam_to_sci_cam_timestamp_offset from + the robot config file with this value. + + --depth_tri_weight (double, default = 1000.0) + The weight to give to the constraint that depth measurements + agree with triangulated points. Use a bigger number as depth + errors are usually on the order of 0.01 meters while + reprojection errors are on the order of 1 pixel. + + --mesh (string, default = "") + Use this geometry mapper mesh from a previous geometry mapper + run to help constrain the calibration. E.g., use fused_mesh.ply. + + --mesh_tri_weight (double, default = 0.0) + A larger value will give more weight to the constraint that + triangulated points stay close to a preexisting mesh. Not + suggested by default. + + --depth_mesh_weight (double, default = 0.0) + A larger value will give more weight to the constraint that the + depth clouds stay close to the mesh. Not suggested by default. + + --affine_depth_to_image (bool, false unless specified) + Assume that the depth_to_image_transform for each depth + image + camera is an arbitrary affine transform rather than a rotation + times a scale. + + --refiner_num_passes (int32, default = 2) + How many passes of optimization to do. Outliers will be removed + after every pass. Each pass will start with the previously + optimized solution as an initial guess. Mesh intersections (if + applicable) and ray triangulation will be recomputed before each + pass. + + --initial_max_reprojection_error (double, default = 300.0) + If filtering outliers, remove interest points for which the + reprojection error, in pixels) is larger than this. This + filtering happens when matches are created, before cameras are + optimized) and a big value should be used if the initial cameras + are not trusted. + + --max_reprojection_error (double, default = 25.0) + If filtering outliers, remove interest points for which the + reprojection error, in pixels) is larger than this. This + filtering happens after each optimization pass finishes, unless + disabled. It is better to not filter too aggressively unless + confident in the solution. + + --refiner_min_angle (double, default = 0.5) + If filtering outliers, remove triangulated points for which all + rays converging to it make an angle (in degrees) less than this. + Note that some cameras in the rig may be very close to each + other relative to the triangulated points, so care is needed + here. + + --refiner_skip_filtering (bool, false unless specified) + Do not do any outlier filtering. + + --out_texture_dir (string, default = "") + If non-empty and if an input mesh was provided, project the + camera images using the optimized poses onto the mesh and write + the obtained .obj files in the given directory. + + --min_ray_dist (double, default = 0.0) + The minimum search distance from a starting point along a ray + when intersecting the ray with a mesh, in meters (if + applicable). + + --max_ray_dist (double, default = 100.0) + The maximum search distance from a starting point along a ray + when intersecting the ray with a mesh, in meters (if + applicable). + + --nav_cam_distortion_replacement (string, default = "") + Replace nav_cam's distortion coefficients with this list after + the initial determination of triangulated points, and then + continue with distortion optimization. A quoted list of four or + five values expected, separated by spaces, as the replacement + distortion model will have radial and tangential + coefficients. Set a positive + --nav_cam_num_exclude_boundary_pixels. + + --registration (bool, false unless specified) + If true, and registration control points for the sparse map + exist and are specified by --hugin_file and --xyz_file, + re-register the sparse map at the end. All the extrinsics, + including depth_to_image_transform, will be scaled accordingly. + This is not needed if the nav_cam intrinsics and the sparse map + do not change. + + --hugin_file (string, default = "") + The path to the hugin .pto file used for sparse map registration. - --xyz_file: The path to the xyz file used for sparse map registration. - --output_map: The map optimized by this tool (it won't change if the - option --fix_map is used). - --num_opt_threads: How many threads to use in the optimization. The - default is 16. - --nav_cam_to_sci_cam_offset_override_value: - Override the value of nav_cam_to_sci_cam_timestamp_offset from the - config file with this value. - --mesh: Refine the sci cam so that the sci cam texture agrees with - the nav cam texture when projected on this mesh. - --mesh_weight: A larger value will give more weight to the mesh - constraint. The mesh residuals printed at the end can be used to - examine the effect of this weight. Default: 25.0. - --mesh_robust_threshold: A larger value will try harder to - minimize large divergences from the mesh (but note that some of - those may be outliers). Default: 3.0. - --verbose: Print the residuals and save the images and match files. - Stereo Pipeline's viewer can be used for visualizing these. + + --xyz_file (string, default = "") + The path to the xyz file used for sparse map registration. + + --parameter_tolerance (double, default = 1e-12) + Stop when the optimization variables change by less than this. + + --num_opt_threads (int32, default = 16) + How many threads to use in the optimization. + + --sci_cam_timestamps (string, default = "") + Use only these sci cam timestamps. Must be a file with one + timestamp per line. + + --verbose (bool, false unless specified) + Print the residuals and save the images and match files. Stereo + Pipeline's viewer can be used for visualizing these. ### Using the refiner with a radtan model for nav_cam The camera refiner supports using a radtan distortion model for -nav_cam, that is a model with radial and and tangenetial distortion, +nav_cam, that is a model with radial and and tangential distortion, just like for haz_cam and sci_cam, but the default nav_cam distortion model is fisheye. One can edit the robot config file and replace the fisheye model with a desired radial + tangential distortion model (4 -or 5 coeffecients are needed) then run the refiner. +or 5 coefficients are needed) then run the refiner. Since it is not easy to find a good initial set of such coefficients, the refiner has the option of computing such a model which best fits @@ -1558,7 +1752,7 @@ fisheye model. Since it is expected that fitting such a model is harder at the periphery, where the distortion is stronger, the camera refiner has -the option --nav_cam_num_exclude_boundary_pixels can be used to +the option ``--nav_cam_num_exclude_boundary_pixels`` can be used to restrict the nav cam view to a central region of given dimensions when such such optimization takes place (whether the new model type is fit on the fly or read from disk when already determined). If a @@ -1566,14 +1760,14 @@ satisfactory solution is found and it is desired to later use the geometry mapper with such a model, note its option ``--undistorted_crop_wins``, and one should keep in mind that that the restricted region specified earlier may not exactly be the region to -be used with the geomety mapper, since the former is specified in +be used with the geometry mapper, since the former is specified in distorted pixels and this one in undistorted pixels. All this logic was tested and was shown to work in a satisfactory way, -but no thorough attept was made at validating that a radtan distortion +but no thorough attempt was made at validating that a radtan distortion model, while having more degrees of freedom, would out-perform the fisheye model. That is rather unlikely, since given sufficiently many -images with good overlap, the effect of the peripherial region where +images with good overlap, the effect of the peripheral region where the fisheye lens distortion may not perform perfectly may be small. ## Orthoprojecting images @@ -1588,9 +1782,9 @@ A geometry mapper run directory has all the inputs this tool needs. It can be run as follows: export ASTROBEE_SOURCE_PATH=$HOME/projects/astrobee/src - export ASTROBEE_BUILD_PATH=$HOME/projects/astrobee + export ASTROBEE_WS=$HOME/projects/astrobee export ISAAC_WS=$HOME/projects/isaac - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config diff --git a/dense_map/geometry_mapper/src/dense_map_utils.cc b/dense_map/geometry_mapper/src/dense_map_utils.cc index 203c849e..5b89e2fe 100644 --- a/dense_map/geometry_mapper/src/dense_map_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_utils.cc @@ -58,6 +58,7 @@ void parse_intrinsics_to_float(std::string const& intrinsics_to_float, std::set< // A function to split a string like 'haz_cam sci_cam depth_to_image' into // its two constituents and validate against the list of known cameras. void parse_extrinsics_to_float(std::vector const& cam_names, + std::string const& ref_cam_name, std::string const& depth_to_image_name, std::string const& extrinsics_to_float, std::set& extrinsics_to_float_set) { @@ -81,6 +82,9 @@ void parse_extrinsics_to_float(std::vector const& cam_names, LOG(FATAL) << "Unknown camera: " << val << "\n"; } + if (extrinsics_to_float_set.find(ref_cam_name) != extrinsics_to_float_set.end()) + LOG(FATAL) << "There exists no extrinsics transform from " << ref_cam_name << " to itself.\n"; + return; } @@ -364,6 +368,19 @@ bool findInterpPose(double desired_time, std::map const return true; } +// Implement some heuristic to find the maximum rotation angle that can result +// from applying the given transform. It is assumed that the transform is not +// too different from the identity. +double maxRotationAngle(Eigen::Affine3d const& T) { + Eigen::Vector3d angles = T.linear().eulerAngles(0, 1, 2); + + // Angles close to +/-pi can result even if the matrix is close to identity + for (size_t it = 0; it < 3; it++) + angles[it] = std::min(std::abs(angles[it]), std::abs(M_PI - std::abs(angles[it]))); + double angle_norm = (180.0 / M_PI) * angles.norm(); + return angle_norm; +} + void StampedPoseStorage::addPose(Eigen::Affine3d const& pose, double timestamp) { int bin_index = floor(timestamp); m_poses[bin_index][timestamp] = pose; diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index 207afb2c..ece8e7f0 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -17,9 +17,19 @@ * under the License. */ -// TODO(oalexan1): Consider adding a haz cam to haz cam -// reprojection error in the camera refiner. There will be more -// haz to haz matches than haz to nav or haz to sci. +// TODO(oalexan1): Move these OpenMVG headers to dense_map_utils.cc, +// and do forward declarations in dense_map_utils.h. Get rid of +// warnings beyond our control +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic push +#include +#include +#include +#include +#include +#pragma GCC diagnostic pop + #include #include #include @@ -71,95 +81,172 @@ #include #include -DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and full-resolution sci_cam data."); +DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam intensity, " + "full-resolution sci_cam, and haz_cam depth clouds."); DEFINE_string(sparse_map, "", "A registered SURF sparse map made with some of the ROS bag data, " - "and including nav cam images closely bracketing the sci cam images."); + "including nav cam images closely bracketing the sci cam images."); DEFINE_string(output_map, "", "Output file containing the updated map."); -DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); +DEFINE_string(nav_cam_topic, "/mgt/img_sampler/nav_cam/image_record", + "The nav cam topic in the bag file."); -DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", + "The depth point cloud topic in the bag file."); DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", "The depth camera intensity topic in the bag file."); DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); -DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); - -DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); +DEFINE_int32(num_overlaps, 10, "How many images (of all camera types) close and forward in " + "time to match to given image."); DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, - "Use depth haz cam clouds that are within this distance in " - "time from the nearest haz cam intensity image."); + "Use a haz cam depth cloud only if it is within this distance in time " + "from the nearest haz cam intensity image."); DEFINE_double(robust_threshold, 3.0, - "Pixel errors much larger than this will be exponentially attenuated " - "to affect less the cost function."); + "Residual pixel errors and 3D point residuals (the latter multiplied " + "by corresponding weight) much larger than this will be " + "exponentially attenuated to affect less the cost function.\n"); DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calibration."); -DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by less than this."); - -DEFINE_double(bracket_len, 2.0, +DEFINE_double(bracket_len, 0.6, "Lookup sci and haz cam images only between consecutive nav cam images " "whose distance in time is no more than this (in seconds), after adjusting " "for the timestamp offset between these cameras. It is assumed the robot " - "moves slowly and uniformly during this time."); + "moves slowly and uniformly during this time. A large value here will " + "make the refiner compute a poor solution but a small value will prevent " + "enough sci_cam images being bracketed."); -DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); +DEFINE_string(nav_cam_intrinsics_to_float, "", + "Refine given nav_cam intrinsics. Specify as a quoted list. " + "For example: 'focal_length optical_center distortion'."); -DEFINE_string(hugin_file, "", "The path to the hugin .pto file used for sparse map registration."); - -DEFINE_string(xyz_file, "", "The path to the xyz file used for sparse map registration."); +DEFINE_string(haz_cam_intrinsics_to_float, "", + "Refine given haz_cam intrinsics. Specify as a quoted list. " + "For example: 'focal_length optical_center distortion'."); -DEFINE_bool(timestamp_interpolation, false, - "If true, interpolate between " - "timestamps. May give better results if the robot is known to move " - "uniformly, and perhaps less so for stop-and-go robot motion."); - -DEFINE_string(sci_cam_timestamps, "", - "Use only these sci cam timestamps. Must be " - "a file with one timestamp per line."); - -DEFINE_bool(skip_registration, false, - "If true, do not re-register the optimized map. " - "Then the hugin and xyz file options need not be provided. " - "This may result in the scale not being correct if the sparse map is not fixed."); - -DEFINE_bool(opt_map_only, false, "If to optimize only the map and not the camera params."); +DEFINE_string(sci_cam_intrinsics_to_float, "", + "Refine given sci_cam intrinsics. Specify as a quoted list. " + "For example: 'focal_length optical_center distortion'."); -DEFINE_bool(fix_map, false, "Do not optimize the sparse map, hence only the camera params."); +DEFINE_string(extrinsics_to_float, "haz_cam sci_cam depth_to_image", + "Specify the cameras whose extrinsics, relative to nav_cam, to optimize. Also " + "consider if to float the haz_cam depth_to_image transform."); DEFINE_bool(float_scale, false, - "If to optimize the scale of the clouds (use it if the " - "sparse map is kept fixed)."); - -DEFINE_string(sci_cam_intrinsics_to_float, "", - "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " - "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical_center'."); + "If to optimize the scale of the clouds, part of haz_cam_depth_to_image_transform " + "(use it if the sparse map is kept fixed, or else rescaling and registration " + "of the map and extrinsics is needed). This parameter should not be used with " + "--affine_depth_to_image when the transform is affine, rather than rigid and a scale." + "See also --extrinsics_to_float."); + +DEFINE_bool(float_sparse_map, false, + "Optimize the sparse map. This should be avoided as it can invalidate the scales " + "of the extrinsics and the registration. It should at least be used with big mesh " + "weights to attenuate those effects. See also: --registration."); + +DEFINE_bool(float_timestamp_offsets, false, + "If to optimize the timestamp offsets among the cameras."); + +DEFINE_int32(nav_cam_num_exclude_boundary_pixels, 0, + "Flag as outliers nav cam pixels closer than this to the boundary, and ignore " + "that boundary region when texturing with the --out_texture_dir option. " + "This may improve the calibration accuracy, especially if switching from fisheye " + "to radtan distortion for nav_cam. See also the geometry_mapper " + "--undistorted_crop_wins option."); + +DEFINE_double(timestamp_offsets_max_change, 1.0, + "If floating the timestamp offsets, do not let them change by more than this " + "(measured in seconds). Existing image bracketing acts as an additional constraint."); DEFINE_double(nav_cam_to_sci_cam_offset_override_value, std::numeric_limits::quiet_NaN(), "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); +DEFINE_double(depth_tri_weight, 1000.0, + "The weight to give to the constraint that depth measurements agree with " + "triangulated points. Use a bigger number as depth errors are usually on the " + "order of 0.01 meters while reprojection errors are on the order of 1 pixel."); + DEFINE_string(mesh, "", - "Refine the sci cam so that the sci cam texture agrees with the nav cam " - "texture when projected on this mesh."); + "Use this geometry mapper mesh from a previous geometry mapper run to help constrain " + "the calibration (e.g., use fused_mesh.ply)."); + +DEFINE_double(mesh_tri_weight, 0.0, + "A larger value will give more weight to the constraint that triangulated points " + "stay close to a preexisting mesh. Not suggested by default."); + +DEFINE_double(depth_mesh_weight, 0.0, + "A larger value will give more weight to the constraint that the depth clouds " + "stay close to the mesh. Not suggested by default."); + +DEFINE_bool(affine_depth_to_image, false, "Assume that the depth_to_image_transform " + "for each depth + image camera is an arbitrary affine transform rather than a " + "rotation times a scale."); + +DEFINE_int32(refiner_num_passes, 2, "How many passes of optimization to do. Outliers will be " + "removed after every pass. Each pass will start with the previously optimized " + "solution as an initial guess. Mesh intersections (if applicable) and ray " + "triangulation will be recomputed before each pass."); + +DEFINE_double(initial_max_reprojection_error, 300.0, "If filtering outliers, remove interest " + "points for which the reprojection error, in pixels, is larger than this. This " + "filtering happens when matches are created, before cameras are optimized, and " + "a big value should be used if the initial cameras are not trusted."); + +DEFINE_double(max_reprojection_error, 25.0, "If filtering outliers, remove interest points for " + "which the reprojection error, in pixels, is larger than this. This filtering " + "happens after each optimization pass finishes, unless disabled. It is better to not " + "filter too aggressively unless confident of the solution."); + +DEFINE_double(refiner_min_angle, 0.5, "If filtering outliers, remove triangulated points " + "for which all rays converging to it make an angle (in degrees) less than this." + "Note that some cameras in the rig may be very close to each other relative to " + "the triangulated points, so care is needed here."); + +DEFINE_bool(refiner_skip_filtering, false, "Do not do any outlier filtering."); + +DEFINE_string(out_texture_dir, "", "If non-empty and if an input mesh was provided, " + "project the camera images using the optimized poses onto the mesh " + "and write the obtained .obj files in the given directory."); + +DEFINE_double(min_ray_dist, 0.0, "The minimum search distance from a starting point along a ray " + "when intersecting the ray with a mesh, in meters (if applicable)."); + +DEFINE_double(max_ray_dist, 100.0, "The maximum search distance from a starting point along a ray " + "when intersecting the ray with a mesh, in meters (if applicable)."); + +DEFINE_string(nav_cam_distortion_replacement, "", + "Replace nav_cam's distortion coefficients with this list after the initial " + "determination of triangulated points, and then continue with distortion " + "optimization. A quoted list of four or five values expected, separated by " + "spaces, as the replacement distortion model will have radial and tangential " + "coefficients. Set a positive --nav_cam_num_exclude_boundary_pixels."); + +DEFINE_bool(registration, false, + "If true, and registration control points for the sparse map exist and are specified " + "by --hugin_file and --xyz_file, re-register the sparse map at the end. All the " + "extrinsics, including depth_to_image_transform, will be scaled accordingly." + "This is not needed if the nav_cam intrinsics and the sparse map do not change."); -DEFINE_double(mesh_weight, 25.0, - "A larger value will give more weight to the mesh constraint. " - "The mesh residuals printed at the end can be used to examine " - "the effect of this weight."); +DEFINE_string(hugin_file, "", "The path to the hugin .pto file used for sparse map registration."); -DEFINE_double(mesh_robust_threshold, 3.0, - "A larger value will try harder to minimize large divergences from " - "the mesh (but note that some of those may be outliers)."); +DEFINE_string(xyz_file, "", "The path to the xyz file used for sparse map registration."); + +DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by " + "less than this."); + +DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); + +DEFINE_string(sci_cam_timestamps, "", + "Use only these sci cam timestamps. Must be a file with one timestamp per line."); DEFINE_bool(verbose, false, "Print the residuals and save the images and match files." @@ -167,8 +254,44 @@ DEFINE_bool(verbose, false, namespace dense_map { -// TODO(oalexan1): Store separately matches which end up being -// squashed in a pid_cid_to_fid. +// Calculate interpolated world to camera trans +Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, + const double* end_world_to_ref_t, + const double* ref_to_cam_trans, + double beg_ref_stamp, + double end_ref_stamp, + double ref_to_cam_offset, + double cam_stamp) { + Eigen::Affine3d beg_world_to_ref_aff; + array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); + + Eigen::Affine3d end_world_to_ref_aff; + array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); + + Eigen::Affine3d ref_to_cam_aff; + array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); + + // Covert from cam time to ref time and normalize. It is very + // important that below we subtract the big numbers from each + // other first, which are the timestamps, then subtract whatever + // else is necessary. Otherwise we get problems with numerical + // precision with CERES. + double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) + / (end_ref_stamp - beg_ref_stamp); + + if (beg_ref_stamp == end_ref_stamp) + alpha = 0.0; // handle division by zero + + if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; + + // Interpolate at desired time + Eigen::Affine3d interp_world_to_ref_aff + = dense_map::linearInterp(alpha, beg_world_to_ref_aff, end_world_to_ref_aff); + + Eigen::Affine3d interp_world_to_cam_aff = ref_to_cam_aff * interp_world_to_ref_aff; + + return interp_world_to_cam_aff; +} ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { // Convert to lower-case @@ -189,462 +312,462 @@ ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { return loss_function; } -// An error function minimizing the error of projection of haz cam -// depth points to the haz cam image. -struct DepthToHazError { +// An error function minimizing the error of projecting +// an xyz point into a camera that is bracketed by +// two reference cameras. The precise timestamp offset +// between them is also floated. +struct BracketedCamError { EIGEN_MAKE_ALIGNED_OPERATOR_NEW - DepthToHazError(Eigen::Vector2d const& haz_pix, Eigen::Vector3d const& depth_xyz, std::vector const& block_sizes, - camera::CameraParameters const& haz_cam_params) - : m_haz_pix(haz_pix), m_depth_xyz(depth_xyz), m_block_sizes(block_sizes), m_haz_cam_params(haz_cam_params) { - // Sanity check. - if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_SCALAR_PARAMS) { - LOG(FATAL) << "DepthToHazError: The block sizes were not set up properly.\n"; + BracketedCamError(Eigen::Vector2d const& meas_dist_pix, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params): + m_meas_dist_pix(meas_dist_pix), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes), + m_cam_params(cam_params), + m_num_focal_lengths(1) { + // Sanity check + if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || + m_block_sizes[3] != NUM_XYZ_PARAMS || m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != m_num_focal_lengths || m_block_sizes[6] != NUM_OPT_CTR_PARAMS || + m_block_sizes[7] != 1 // This will be overwritten shortly + ) { + LOG(FATAL) << "BracketedCamError: The block sizes were not set up properly.\n"; } + + // Set the correct distortion size. This cannot be done in the interface for now. + m_block_sizes[7] = m_cam_params.GetDistortion().size(); } // Call to work with ceres::DynamicNumericDiffCostFunction. - // Takes array of arrays as parameters. bool operator()(double const* const* parameters, double* residuals) const { - // Populate the intrinsics + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[4][0], // ref_to_cam_offset + m_cam_stamp); - // The current transform from the depth point cloud to the image - Eigen::Affine3d depth_to_image; - array_to_rigid_transform(depth_to_image, parameters[0]); + // World point + Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - // Apply the scale - double depth_to_image_scale = parameters[1][0]; - depth_to_image.linear() *= depth_to_image_scale; + // Make a deep copy which we will modify + camera::CameraParameters cam_params = m_cam_params; + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); + Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); + Eigen::VectorXd distortion(m_block_sizes[7]); + for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; + cam_params.SetFocalLength(focal_vector); + cam_params.SetOpticalOffset(optical_center); + cam_params.SetDistortion(distortion); - // Convert from depth cloud coordinates to haz cam coordinates - Eigen::Vector3d X = depth_to_image * m_depth_xyz; + // Convert world point to given cam coordinates + X = world_to_cam_trans * X; - // Project into the camera - Eigen::Vector2d undist_pix = m_haz_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - // Eigen::Vector2d dist_pix; - // m_haz_cam_params.Convert(undist_pix, &dist_pix); + // Project into the image + Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + Eigen::Vector2d curr_dist_pix; + cam_params.Convert(undist_pix, &curr_dist_pix); // Compute the residuals - residuals[0] = undist_pix[0] - m_haz_pix[0]; - residuals[1] = undist_pix[1] - m_haz_pix[1]; + residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; + residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; return true; } // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, - std::vector const& block_sizes, - camera::CameraParameters const& haz_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction( - new DepthToHazError(nav_pix, depth_xyz, block_sizes, haz_cam_params)); + static ceres::CostFunction* + Create(Eigen::Vector2d const& meas_dist_pix, double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes, + camera::CameraParameters const& cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedCamError(meas_dist_pix, left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes, cam_params)); - // The residual size is always the same. cost_function->SetNumResiduals(NUM_PIX_PARAMS); - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { + // The camera wrapper knows all of the block sizes to add, except + // for distortion, which is last + for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 cost_function->AddParameterBlock(block_sizes[i]); - } + + // The distortion block size is added separately as it is variable + cost_function->AddParameterBlock(cam_params.GetDistortion().size()); + return cost_function; } private: - Eigen::Vector2d m_haz_pix; // The pixel observation - Eigen::Vector3d m_depth_xyz; // The measured position + Eigen::Vector2d m_meas_dist_pix; // Measured distorted current camera pixel + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp std::vector m_block_sizes; - camera::CameraParameters m_haz_cam_params; -}; // End class DepthToHazError + camera::CameraParameters m_cam_params; + int m_num_focal_lengths; +}; // End class BracketedCamError -// An error function minimizing the error of projection of a point in the nav cam -// image. Both the point and the camera pose are variables of optimization. -struct NavError { +// An error function minimizing the error of projecting an xyz point +// into a reference camera. Bracketing, timestamps, and transform to +// ref cam are not needed. +struct RefCamError { EIGEN_MAKE_ALIGNED_OPERATOR_NEW - NavError(Eigen::Vector2d const& nav_pix, std::vector const& block_sizes, - camera::CameraParameters const& nav_cam_params) - : m_nav_pix(nav_pix), m_block_sizes(block_sizes), m_nav_cam_params(nav_cam_params) { + RefCamError(Eigen::Vector2d const& meas_dist_pix, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params): + m_meas_dist_pix(meas_dist_pix), + m_block_sizes(block_sizes), + m_cam_params(cam_params), + m_num_focal_lengths(1) { // Sanity check - if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_XYZ_PARAMS) { - LOG(FATAL) << "NavError: The block sizes were not set up properly.\n"; + if (m_block_sizes.size() != 5 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_XYZ_PARAMS || + m_block_sizes[2] != m_num_focal_lengths || + m_block_sizes[3] != NUM_OPT_CTR_PARAMS || + m_block_sizes[4] != 1 // This will be overwritten shortly + ) { + LOG(FATAL) << "RefCamError: The block sizes were not set up properly.\n"; } + + // Set the correct distortion size. This cannot be done in the interface for now. + m_block_sizes[4] = m_cam_params.GetDistortion().size(); } // Call to work with ceres::DynamicNumericDiffCostFunction. - // Takes array of arrays as parameters. bool operator()(double const* const* parameters, double* residuals) const { - // Populate the intrinsics - - Eigen::Affine3d world_to_nav_cam; - array_to_rigid_transform(world_to_nav_cam, parameters[0]); + Eigen::Affine3d world_to_ref_t; + array_to_rigid_transform(world_to_ref_t, parameters[0]); - Eigen::Vector3d xyz; - for (int it = 0; it < NUM_XYZ_PARAMS; it++) xyz[it] = parameters[1][it]; + // World point + Eigen::Vector3d X; + for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[1][it]; - // Convert to camera coordinates - Eigen::Vector3d X = world_to_nav_cam * xyz; + // Make a deep copy which we will modify + camera::CameraParameters cam_params = m_cam_params; + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[2][0], parameters[2][0]); + Eigen::Vector2d optical_center(parameters[3][0], parameters[3][1]); + Eigen::VectorXd distortion(m_block_sizes[4]); + for (int i = 0; i < m_block_sizes[4]; i++) distortion[i] = parameters[4][i]; + cam_params.SetFocalLength(focal_vector); + cam_params.SetOpticalOffset(optical_center); + cam_params.SetDistortion(distortion); + + // Convert world point to given cam coordinates + X = world_to_ref_t * X; - // Project into the camera - Eigen::Vector2d undist_pix = m_nav_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - // Eigen::Vector2d dist_pix; - // m_nav_cam_params.Convert(undist_pix, &dist_pix); + // Project into the image + Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + Eigen::Vector2d curr_dist_pix; + cam_params.Convert(undist_pix, &curr_dist_pix); // Compute the residuals - residuals[0] = undist_pix[0] - m_nav_pix[0]; - residuals[1] = undist_pix[1] - m_nav_pix[1]; + residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; + residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; return true; } // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, std::vector const& block_sizes, - camera::CameraParameters const& nav_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction(new NavError(nav_pix, block_sizes, nav_cam_params)); + static ceres::CostFunction* + Create(Eigen::Vector2d const& meas_dist_pix, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new RefCamError(meas_dist_pix, block_sizes, cam_params)); // The residual size is always the same. cost_function->SetNumResiduals(NUM_PIX_PARAMS); - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { + // The camera wrapper knows all of the block sizes to add, except + // for distortion, which is last + for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 cost_function->AddParameterBlock(block_sizes[i]); - } + + // The distortion block size is added separately as it is variable + cost_function->AddParameterBlock(cam_params.GetDistortion().size()); + return cost_function; } private: - Eigen::Vector2d m_nav_pix; // The pixel observation + Eigen::Vector2d m_meas_dist_pix; std::vector m_block_sizes; - camera::CameraParameters m_nav_cam_params; -}; // End class NavError - -// An error function minimizing the error of transforming depth points -// first to haz cam image coordinates, then to nav cam coordinates -// (via time interpolation) then to world coordinates, then by -// projecting into the nav cam image having a match point with the -// original haz cam image. This is a little tricky to follow, because -// each haz cam image is bound in time by two nav cam images. -struct DepthToNavError { + camera::CameraParameters m_cam_params; + int m_num_focal_lengths; +}; // End class RefCamError + +// An error function minimizing the product of a given weight and the +// error between a triangulated point and a measured depth point. The +// depth point needs to be transformed to world coordinates first. For +// that one has to do pose interpolation. +struct BracketedDepthError { EIGEN_MAKE_ALIGNED_OPERATOR_NEW - DepthToNavError(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, - double alpha, // used for interpolation - bool match_left, std::vector const& block_sizes, camera::CameraParameters const& nav_cam_params) - : m_nav_pix(nav_pix), - m_depth_xyz(depth_xyz), - m_alpha(alpha), - m_match_left(match_left), - m_block_sizes(block_sizes), - m_nav_cam_params(nav_cam_params) { - // Sanity check. - if (m_block_sizes.size() != 5 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_SCALAR_PARAMS) { - LOG(FATAL) << "DepthToNavError: The block sizes were not set up properly.\n"; + BracketedDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 7 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || + (m_block_sizes[3] != NUM_RIGID_PARAMS && m_block_sizes[3] != NUM_AFFINE_PARAMS) || + m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != NUM_XYZ_PARAMS || + m_block_sizes[6] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "BracketedDepthError: The block sizes were not set up properly.\n"; } } // Call to work with ceres::DynamicNumericDiffCostFunction. bool operator()(double const* const* parameters, double* residuals) const { - // As mentioned before, we have to deal with four transforms - - Eigen::Affine3d left_nav_trans; - array_to_rigid_transform(left_nav_trans, parameters[0]); - - Eigen::Affine3d right_nav_trans; - array_to_rigid_transform(right_nav_trans, parameters[1]); - - Eigen::Affine3d hazcam_to_navcam_trans; - array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + // Current world to camera transform + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[6][0], // ref_to_cam_offset + m_cam_stamp); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + if (m_block_sizes[3] == NUM_AFFINE_PARAMS) + array_to_affine_transform(depth_to_image, parameters[3]); + else + array_to_rigid_transform(depth_to_image, parameters[3]); - // The haz cam depth to image transform, which has a fixed scale - Eigen::Affine3d hazcam_depth_to_image_trans; - array_to_rigid_transform(hazcam_depth_to_image_trans, parameters[3]); + // Apply the scale double depth_to_image_scale = parameters[4][0]; - hazcam_depth_to_image_trans.linear() *= depth_to_image_scale; - - // Convert from depth cloud coordinates to haz cam coordinates - Eigen::Vector3d X = hazcam_depth_to_image_trans * m_depth_xyz; - - // Convert to nav cam coordinates - X = hazcam_to_navcam_trans * X; + depth_to_image.linear() *= depth_to_image_scale; - // The haz cam to nav cam transform at the haz cam time is obtained - // by interpolation in time - Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(m_alpha, left_nav_trans, right_nav_trans); + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; // Convert to world coordinates - X = interp_world_to_nav_trans.inverse() * X; - - // Transform to either the left or right nav camera coordinates, - // depending on for which one we managed to find a match with he - // haz cam image - if (m_match_left) { - X = left_nav_trans * X; - } else { - X = right_nav_trans * X; - } + M = world_to_cam_trans.inverse() * M; - // Project into the image - Eigen::Vector2d undist_pix = m_nav_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - // Eigen::Vector2d dist_pix; - // m_nav_cam_params.Convert(undist_pix, &dist_pix); + // Triangulated world point + Eigen::Vector3d X(parameters[5][0], parameters[5][1], parameters[5][2]); // Compute the residuals - residuals[0] = undist_pix[0] - m_nav_pix[0]; - residuals[1] = undist_pix[1] - m_nav_pix[1]; + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (X[it] - M[it]); + } return true; } // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, double alpha, - bool match_left, std::vector const& block_sizes, - camera::CameraParameters const& nav_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction( - new DepthToNavError(nav_pix, depth_xyz, alpha, match_left, block_sizes, nav_cam_params)); + static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, + double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedDepthError(weight, meas_depth_xyz, left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_PIX_PARAMS); + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { + for (size_t i = 0; i < block_sizes.size(); i++) cost_function->AddParameterBlock(block_sizes[i]); - } + return cost_function; } private: - Eigen::Vector2d m_nav_pix; // The nav cam pixel observation - Eigen::Vector3d m_depth_xyz; // The measured position in the depth camera - double m_alpha; - bool m_match_left; + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp std::vector m_block_sizes; - camera::CameraParameters m_nav_cam_params; -}; // End class DepthToNavError +}; // End class BracketedDepthError -// An error function minimizing the error of transforming depth points at haz cam -// time through enough coordinate systems until it can project -// into the sci cam at the sci cam time -struct DepthToSciError { +// An error function minimizing the product of a given weight and the +// error between a mesh point and a transformed measured depth point. The +// depth point needs to be transformed to world coordinates first. For +// that one has to do pose interpolation. +struct BracketedDepthMeshError { EIGEN_MAKE_ALIGNED_OPERATOR_NEW - DepthToSciError(Eigen::Vector2d const& dist_sci_pix, Eigen::Vector3d const& depth_xyz, double alpha_haz, - double alpha_sci, // used for interpolation - std::vector const& block_sizes, camera::CameraParameters const& sci_cam_params) - : m_dist_sci_pix(dist_sci_pix), - m_depth_xyz(depth_xyz), - m_alpha_haz(alpha_haz), - m_alpha_sci(alpha_sci), - m_block_sizes(block_sizes), - m_sci_cam_params(sci_cam_params), - m_num_focal_lengths(1) { - // Sanity check. - if (m_block_sizes.size() != 9 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_RIGID_PARAMS || m_block_sizes[5] != NUM_SCALAR_PARAMS || - m_block_sizes[6] != m_num_focal_lengths || m_block_sizes[7] != NUM_OPT_CTR_PARAMS || - m_block_sizes[8] != m_sci_cam_params.GetDistortion().size()) { - LOG(FATAL) << "DepthToSciError: The block sizes were not set up properly.\n"; + BracketedDepthMeshError(double weight, + Eigen::Vector3d const& meas_depth_xyz, + Eigen::Vector3d const& mesh_xyz, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_mesh_xyz(mesh_xyz), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 6 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || + (m_block_sizes[3] != NUM_RIGID_PARAMS && m_block_sizes[3] != NUM_AFFINE_PARAMS) || + m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "BracketedDepthMeshError: The block sizes were not set up properly.\n"; } } // Call to work with ceres::DynamicNumericDiffCostFunction. bool operator()(double const* const* parameters, double* residuals) const { - // Make a deep copy which we will modify - camera::CameraParameters sci_cam_params = m_sci_cam_params; - - // We have to deal with four transforms - Eigen::Affine3d left_nav_trans; - array_to_rigid_transform(left_nav_trans, parameters[0]); - - Eigen::Affine3d right_nav_trans; - array_to_rigid_transform(right_nav_trans, parameters[1]); - - Eigen::Affine3d hazcam_to_navcam_trans; - array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); - - Eigen::Affine3d scicam_to_hazcam_trans; - array_to_rigid_transform(scicam_to_hazcam_trans, parameters[3]); - - // The haz cam depth to image transform, which has a fixed scale - Eigen::Affine3d hazcam_depth_to_image_trans; - array_to_rigid_transform(hazcam_depth_to_image_trans, parameters[4]); - double depth_to_image_scale = parameters[5][0]; - hazcam_depth_to_image_trans.linear() *= depth_to_image_scale; - - // Intrinsics, including a single focal length - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[6][0], parameters[6][0]); - Eigen::Vector2d optical_center(parameters[7][0], parameters[7][1]); - Eigen::VectorXd distortion(m_block_sizes[8]); - for (int i = 0; i < m_block_sizes[8]; i++) distortion[i] = parameters[8][i]; - sci_cam_params.SetFocalLength(focal_vector); - sci_cam_params.SetOpticalOffset(optical_center); - sci_cam_params.SetDistortion(distortion); - - // Convert from depth cloud coordinates to haz cam coordinates - Eigen::Vector3d X = hazcam_depth_to_image_trans * m_depth_xyz; - - // Convert to nav cam coordinates at haz cam time - X = hazcam_to_navcam_trans * X; + // Current world to camera transform + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[5][0], // ref_to_cam_offset + m_cam_stamp); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + if (m_block_sizes[3] == NUM_AFFINE_PARAMS) + array_to_affine_transform(depth_to_image, parameters[3]); + else + array_to_rigid_transform(depth_to_image, parameters[3]); - // World to navcam at haz time - Eigen::Affine3d interp_world_to_nav_trans_haz_time = - dense_map::linearInterp(m_alpha_haz, left_nav_trans, right_nav_trans); + // Apply the scale + double depth_to_image_scale = parameters[4][0]; + depth_to_image.linear() *= depth_to_image_scale; - // World to nav time at sci time - Eigen::Affine3d interp_world_to_nav_trans_sci_time = - dense_map::linearInterp(m_alpha_sci, left_nav_trans, right_nav_trans); + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; // Convert to world coordinates - X = interp_world_to_nav_trans_haz_time.inverse() * X; - - // Convert to nav coordinates at sci cam time - X = interp_world_to_nav_trans_sci_time * X; - - // Convert to sci cam coordinates - X = scicam_to_hazcam_trans.inverse() * hazcam_to_navcam_trans.inverse() * X; - - // Convert to sci cam pix - Eigen::Vector2d undist_pix = sci_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - - // Apply distortion - Eigen::Vector2d comp_dist_sci_pix; - sci_cam_params.Convert(undist_pix, &comp_dist_sci_pix); + M = world_to_cam_trans.inverse() * M; // Compute the residuals - residuals[0] = comp_dist_sci_pix[0] - m_dist_sci_pix[0]; - residuals[1] = comp_dist_sci_pix[1] - m_dist_sci_pix[1]; + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (m_mesh_xyz[it] - M[it]); + } return true; } // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& dist_sci_pix, Eigen::Vector3d const& depth_xyz, - double alpha_haz, double alpha_sci, std::vector const& block_sizes, - camera::CameraParameters const& sci_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction( - new DepthToSciError(dist_sci_pix, depth_xyz, alpha_haz, alpha_sci, block_sizes, sci_cam_params)); + static ceres::CostFunction* Create(double weight, + Eigen::Vector3d const& meas_depth_xyz, + Eigen::Vector3d const& mesh_xyz, + double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedDepthMeshError(weight, meas_depth_xyz, mesh_xyz, + left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_PIX_PARAMS); + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { + for (size_t i = 0; i < block_sizes.size(); i++) cost_function->AddParameterBlock(block_sizes[i]); - } + return cost_function; } private: - Eigen::Vector2d m_dist_sci_pix; // The sci cam pixel observation - Eigen::Vector3d m_depth_xyz; // The measured position in the depth camera - double m_alpha_haz; - double m_alpha_sci; + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + Eigen::Vector3d m_mesh_xyz; // Point on preexisting mesh + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp std::vector m_block_sizes; - camera::CameraParameters m_sci_cam_params; - int m_num_focal_lengths; -}; // End class DepthToSciError +}; // End class BracketedDepthMeshError -// An error function projecting an xyz point in the sci cam -// bounded by two nav cams. -struct SciError { +// An error function minimizing the product of a given weight and the +// error between a triangulated point and a measured depth point for +// the ref camera. The depth point needs to be transformed to world +// coordinates first. +struct RefDepthError { EIGEN_MAKE_ALIGNED_OPERATOR_NEW - SciError(Eigen::Vector2d const& dist_sci_pix, - double alpha, // used for interpolation - std::vector const& block_sizes, camera::CameraParameters const& sci_cam_params) - : m_dist_sci_pix(dist_sci_pix), - m_alpha(alpha), - m_block_sizes(block_sizes), - m_sci_cam_params(sci_cam_params), - m_num_focal_lengths(1) { - // Sanity check. - if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_XYZ_PARAMS || m_block_sizes[5] != m_num_focal_lengths || - m_block_sizes[6] != NUM_OPT_CTR_PARAMS || m_block_sizes[7] != m_sci_cam_params.GetDistortion().size()) { - LOG(FATAL) << "SciError: The block sizes were not set up properly.\n"; + RefDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 4 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + (m_block_sizes[1] != NUM_RIGID_PARAMS && m_block_sizes[1] != NUM_AFFINE_PARAMS) || + m_block_sizes[2] != NUM_SCALAR_PARAMS || + m_block_sizes[3] != NUM_XYZ_PARAMS) { + LOG(FATAL) << "RefDepthError: The block sizes were not set up properly.\n"; } } // Call to work with ceres::DynamicNumericDiffCostFunction. bool operator()(double const* const* parameters, double* residuals) const { - // Make a deep copy which we will modify - camera::CameraParameters sci_cam_params = m_sci_cam_params; - - // We have to deal with four transforms - - Eigen::Affine3d left_nav_trans; - array_to_rigid_transform(left_nav_trans, parameters[0]); - - Eigen::Affine3d right_nav_trans; - array_to_rigid_transform(right_nav_trans, parameters[1]); - - Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(m_alpha, left_nav_trans, right_nav_trans); - - Eigen::Affine3d hazcam_to_navcam_trans; - array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + // Current world to camera transform + Eigen::Affine3d world_to_cam; + array_to_rigid_transform(world_to_cam, parameters[0]); - Eigen::Affine3d scicam_to_hazcam_trans; - array_to_rigid_transform(scicam_to_hazcam_trans, parameters[3]); - - Eigen::Vector3d X; - for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[4][it]; + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + if (m_block_sizes[1] == NUM_AFFINE_PARAMS) + array_to_affine_transform(depth_to_image, parameters[1]); + else + array_to_rigid_transform(depth_to_image, parameters[1]); - // Intrinsics, including a single focal length - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); - Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); - Eigen::VectorXd distortion(m_block_sizes[7]); - for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; - sci_cam_params.SetFocalLength(focal_vector); - sci_cam_params.SetOpticalOffset(optical_center); - sci_cam_params.SetDistortion(distortion); + // Apply the scale + double depth_to_image_scale = parameters[2][0]; + depth_to_image.linear() *= depth_to_image_scale; - // Find the sci cam to world transform - Eigen::Affine3d interp_world_to_sci_trans = - scicam_to_hazcam_trans.inverse() * hazcam_to_navcam_trans.inverse() * interp_world_to_nav_trans; + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - // Project into the sci cam - Eigen::Vector3d sciX = interp_world_to_sci_trans * X; - Eigen::Vector2d undist_sci_pix = sci_cam_params.GetFocalVector().cwiseProduct(sciX.hnormalized()); + // Convert to world coordinates + M = world_to_cam.inverse() * M; - // Convert to distorted pixel - Eigen::Vector2d comp_dist_sci_pix; - sci_cam_params.Convert(undist_sci_pix, &comp_dist_sci_pix); + // Triangulated world point + Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); // Compute the residuals - residuals[0] = comp_dist_sci_pix[0] - m_dist_sci_pix[0]; - residuals[1] = comp_dist_sci_pix[1] - m_dist_sci_pix[1]; + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (X[it] - M[it]); + } return true; } // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& dist_sci_pix, double alpha, - std::vector const& block_sizes, - camera::CameraParameters const& sci_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction( - new SciError(dist_sci_pix, alpha, block_sizes, sci_cam_params)); + static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, + std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new RefDepthError(weight, meas_depth_xyz, block_sizes)); - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_PIX_PARAMS); + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { + for (size_t i = 0; i < block_sizes.size(); i++) cost_function->AddParameterBlock(block_sizes[i]); - } + return cost_function; } private: - Eigen::Vector2d m_dist_sci_pix; // The sci cam pixel observation - double m_alpha; + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement std::vector m_block_sizes; - camera::CameraParameters m_sci_cam_params; - int m_num_focal_lengths; -}; // End class SciError +}; // End class RefDepthError // An error function minimizing a weight times the distance from a // variable xyz point to a fixed reference xyz point. @@ -662,7 +785,8 @@ struct XYZError { // TODO(oalexan1): May want to use the analytical Ceres cost function bool operator()(double const* const* parameters, double* residuals) const { // Compute the residuals - for (int it = 0; it < NUM_XYZ_PARAMS; it++) residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); + for (int it = 0; it < NUM_XYZ_PARAMS; it++) + residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); return true; } @@ -672,7 +796,8 @@ struct XYZError { std::vector const& block_sizes, double weight) { ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction(new XYZError(ref_xyz, block_sizes, weight)); + new ceres::DynamicNumericDiffCostFunction + (new XYZError(ref_xyz, block_sizes, weight)); // The residual size is always the same cost_function->SetNumResiduals(NUM_XYZ_PARAMS); @@ -690,8 +815,6 @@ struct XYZError { double m_weight; }; // End class XYZError -enum imgType { NAV_CAM, SCI_CAM, HAZ_CAM }; - // Calculate the rmse residual for each residual type. void calc_median_residuals(std::vector const& residuals, std::vector const& residual_names, @@ -702,12 +825,13 @@ void calc_median_residuals(std::vector const& residuals, LOG(FATAL) << "There must be as many residuals as residual names."; std::map > stats; - for (size_t it = 0; it < residuals.size(); it++) stats[residual_names[it]] = std::vector(); // initialize + for (size_t it = 0; it < residuals.size(); it++) + stats[residual_names[it]] = std::vector(); // initialize for (size_t it = 0; it < residuals.size(); it++) stats[residual_names[it]].push_back(std::abs(residuals[it])); - std::cout << "The 25, 50 and 75 percentile residual stats " << tag << std::endl; + std::cout << "The 25, 50, 75, and 100th percentile residual stats " << tag << std::endl; for (auto it = stats.begin(); it != stats.end(); it++) { std::string const& name = it->first; std::vector vals = stats[name]; // make a copy @@ -716,20 +840,213 @@ void calc_median_residuals(std::vector const& residuals, int len = vals.size(); int it1 = static_cast(0.25 * len); - int it2 = static_cast(0.5 * len); + int it2 = static_cast(0.50 * len); int it3 = static_cast(0.75 * len); + int it4 = static_cast(len - 1); - if (len = 0) - std::cout << name << ": " - << "none" << std::endl; + if (len == 0) + std::cout << name << ": " << "none"; else - std::cout << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3] << std::endl; + std::cout << std::setprecision(8) + << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' + << vals[it3] << ' ' << vals[it4]; + std::cout << " (" << len << " residuals)" << std::endl; } } + typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; + + // A class to encompass all known information about a camera + // This is work in progress and will replace some of the logic further down. + struct cameraImage { + // An index to look up the type of camera. This will equal the + // value ref_camera_type if and only if this is a reference + // camera. + int camera_type; + + // The timestamp for this camera (in floating point seconds since epoch) + // measured by the clock/cpu which is particular to this camera. + double timestamp; + + // The timestamp with an adjustment added to it to be in + // reference camera time + double ref_timestamp; + + // The timestamp of the closest cloud for this image, measured + // with the same clock as the 'timestamp' value. + double cloud_timestamp; + + // Indices to look up the reference cameras bracketing this camera + // in time. The two indices will have same value if and only if + // this is a reference camera. + int beg_ref_index; + int end_ref_index; + + // The image for this camera, in grayscale + cv::Mat image; + + // The corresponding depth cloud, for an image + depth camera. + // Will be empty and uninitialized for a camera lacking depth. + cv::Mat depth_cloud; + }; + + // Sort by timestamps adjusted to be relative to the ref camera clock + bool timestampLess(cameraImage i, cameraImage j) { + return (i.ref_timestamp < j.ref_timestamp); + } + + // Find the haz cam depth measurement. Use nearest neighbor interpolation + // to look into the depth cloud. + bool depthValue( // Inputs + cv::Mat const& depth_cloud, Eigen::Vector2d const& dist_ip, + // Output + Eigen::Vector3d& depth_xyz) { + depth_xyz = Eigen::Vector3d(0, 0, 0); // initialize + + if (depth_cloud.cols == 0 && depth_cloud.rows == 0) return false; // empty cloud + + int col = round(dist_ip[0]); + int row = round(dist_ip[1]); + + if (col < 0 || row < 0 || col > depth_cloud.cols || row > depth_cloud.rows) + LOG(FATAL) << "Out of range in depth cloud."; + + // After rounding one may hit the bound + if (col == depth_cloud.cols || row == depth_cloud.rows) + return false; + + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) + return false; + + depth_xyz = Eigen::Vector3d(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + + return true; + } + + // Project given images with optimized cameras onto mesh. It is + // assumed that the most up-to-date cameras were copied/interpolated + // form the optimizer structures into the world_to_cam vector. + void meshProjectCameras(std::vector const& cam_names, + std::vector const& cam_params, + std::vector const& cam_images, + std::vector const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + int ref_camera_type, int nav_cam_num_exclude_boundary_pixels, + std::string const& out_dir) { + if (cam_names.size() != cam_params.size()) + LOG(FATAL) << "There must be as many camera names as sets of camera parameters.\n"; + if (cam_images.size() != world_to_cam.size()) + LOG(FATAL) << "There must be as many camera images as camera poses.\n"; + if (out_dir.empty()) + LOG(FATAL) << "The output directory is empty.\n"; + + char filename_buffer[1000]; + + for (size_t cid = 0; cid < cam_images.size(); cid++) { + double timestamp = cam_images[cid].timestamp; + int cam_type = cam_images[cid].camera_type; + + int num_exclude_boundary_pixels = 0; + if (cam_type == ref_camera_type) + num_exclude_boundary_pixels = nav_cam_num_exclude_boundary_pixels; + + // Must use the 10.7f format for the timestamp as everywhere else in the code + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s", + out_dir.c_str(), timestamp, cam_names[cam_type].c_str()); + std::string out_prefix = filename_buffer; // convert to string + + std::cout << "Creating texture for: " << out_prefix << std::endl; + meshProject(mesh, bvh_tree, cam_images[cid].image, world_to_cam[cid], cam_params[cam_type], + num_exclude_boundary_pixels, out_prefix); + } + } + + // Rebuild the map. + // TODO(oalexan1): This must be integrated in astrobee. + void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data + camera::CameraParameters const& cam_params, + boost::shared_ptr sparse_map) { + std::string rebuild_detector = "SURF"; + std::cout << "Rebuilding map with " << rebuild_detector << " detector."; + + // Set programatically the command line option for astrobee's map + // building min angle based on the corresponding refiner flag. + std::ostringstream oss; + oss << FLAGS_refiner_min_angle; + std::string min_valid_angle = oss.str(); + google::SetCommandLineOption("min_valid_angle", min_valid_angle.c_str()); + if (!gflags::GetCommandLineOption("min_valid_angle", &min_valid_angle)) + LOG(FATAL) << "Failed to get the value of --min_valid_angle in Astrobee " + << "map-building software.\n"; + // The newline below is due to the sparse map software not putting a newline + std::cout << "\nSetting --min_valid_angle " << min_valid_angle << ".\n"; + + // Copy some data to make sure it does not get lost on resetting things below + std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; + std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; + + // Ensure the new camera parameters are set + sparse_map->SetCameraParameters(cam_params); + + std::cout << "Detecting features."; + sparse_map->DetectFeatures(); + + std::cout << "Matching features."; + // Borrow from the original map which images should be matched with which. + sparse_map->cid_to_cid_.clear(); + for (size_t p = 0; p < pid_to_cid_fid.size(); p++) { + std::map const& track = pid_to_cid_fid[p]; // alias + for (std::map::const_iterator it1 = track.begin(); + it1 != track.end() ; it1++) { + for (std::map::const_iterator it2 = it1; + it2 != track.end() ; it2++) { + if (it1->first != it2->first) { + // Never match an image with itself + sparse_map->cid_to_cid_[it1->first].insert(it2->first); + } + } + } + } + + sparse_mapping::MatchFeatures(sparse_mapping::EssentialFile(map_file), + sparse_mapping::MatchesFile(map_file), sparse_map.get()); + for (size_t i = 0; i < world_to_ref_t.size(); i++) + sparse_map->SetFrameGlobalTransform(i, world_to_ref_t[i]); + + // Wipe file that is no longer needed + try { + std::remove(sparse_mapping::EssentialFile(map_file).c_str()); + }catch(...) {} + + std::cout << "Building tracks."; + bool rm_invalid_xyz = true; // by now cameras are good, so filter out bad stuff + sparse_mapping::BuildTracks(rm_invalid_xyz, + sparse_mapping::MatchesFile(map_file), + sparse_map.get()); + + // It is essential that during re-building we do not vary the + // cameras. Those were usually computed with a lot of SURF features, + // while rebuilding is usually done with many fewer ORGBRISK + // features. + bool fix_cameras = true; + if (fix_cameras) + std::cout << "Performing bundle adjustment with fixed cameras."; + else + std::cout << "Performing bundle adjustment while floating cameras."; + + sparse_mapping::BundleAdjust(fix_cameras, sparse_map.get()); + } + + // TODO(oalexan1): Move this to utils. // Intersect ray with mesh. Return true on success. - bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, camera::CameraParameters const& cam_params, - Eigen::Affine3d const& world_to_cam, mve::TriangleMesh::Ptr const& mesh, + bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, + camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, double min_ray_dist, double max_ray_dist, // Output @@ -766,1453 +1083,1615 @@ void calc_median_residuals(std::vector const& residuals, return false; } - // Prevent the linter from messing up with the beautiful formatting below - void add_haz_nav_cost // NOLINT - (// Inputs // NOLINT - int haz_it, int nav_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & haz_cam_intensity_timestamps, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::map const & haz_cam_to_left_nav_cam_index, // NOLINT - std::map const & haz_cam_to_right_nav_cam_index, // NOLINT - camera::CameraParameters const & nav_cam_params, // NOLINT - camera::CameraParameters const & haz_cam_params, // NOLINT - std::vector const & depth_to_nav_block_sizes, // NOLINT - std::vector const & depth_to_haz_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - // Outputs // NOLINT - std::vector & residual_names, // NOLINT - double & hazcam_depth_to_image_scale, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & hazcam_depth_to_image_vec, // NOLINT - ceres::Problem & problem) { // NOLINT - // Figure out the two nav cam indices bounding the current haz cam - // Must have sparse_map_timestamp + navcam_to_hazcam_timestamp_offset <= haz_timestamp - // which must be < next sparse_map_timestamp + navcam_to_hazcam_timestamp_offset. - bool match_left = false; - if (haz_cam_intensity_timestamps[haz_it] >= - sparse_map_timestamps[nav_cam_start + nav_it] + navcam_to_hazcam_timestamp_offset) { - match_left = true; - } else { - match_left = false; // match right then + // Compute the transforms from the world to every camera, using pose interpolation + // if necessary. + void calc_world_to_cam_transforms( // Inputs + std::vector const& cams, std::vector const& world_to_ref_vec, + std::vector const& ref_timestamps, std::vector const& ref_to_cam_vec, + std::vector const& ref_to_cam_timestamp_offsets, + // Output + std::vector& world_to_cam) { + if (ref_to_cam_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_to_cam_timestamp_offsets.size()) + LOG(FATAL) << "Must have as many transforms to reference as timestamp offsets.\n"; + if (world_to_ref_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_timestamps.size()) + LOG(FATAL) << "Must have as many reference timestamps as reference cameras.\n"; + + world_to_cam.resize(cams.size()); + + for (size_t it = 0; it < cams.size(); it++) { + int beg_index = cams[it].beg_ref_index; + int end_index = cams[it].end_ref_index; + int cam_type = cams[it].camera_type; + world_to_cam[it] = dense_map::calc_world_to_cam_trans + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + ref_timestamps[beg_index], ref_timestamps[end_index], + ref_to_cam_timestamp_offsets[cam_type], + cams[it].timestamp); } + return; + } - auto left_it = haz_cam_to_left_nav_cam_index.find(haz_it); - auto right_it = haz_cam_to_right_nav_cam_index.find(haz_it); - if (left_it == haz_cam_to_left_nav_cam_index.end() || - right_it == haz_cam_to_right_nav_cam_index.end()) - LOG(FATAL) << "Book-keeping error in add_haz_nav_cost."; + // Match features while assuming that the input cameras can be used to filter out + // outliers by reprojection error. + void matchFeaturesWithCams(std::mutex* match_mutex, int left_image_index, int right_image_index, + camera::CameraParameters const& left_params, + camera::CameraParameters const& right_params, + Eigen::Affine3d const& left_world_to_cam, + Eigen::Affine3d const& right_world_to_cam, + double reprojection_error, + cv::Mat const& left_descriptors, cv::Mat const& right_descriptors, + Eigen::Matrix2Xd const& left_keypoints, + Eigen::Matrix2Xd const& right_keypoints, + bool verbose, + // output + MATCH_PAIR* matches) { + // Match by using descriptors first + std::vector cv_matches; + interest_point::FindMatches(left_descriptors, right_descriptors, &cv_matches); + + // Do filtering + std::vector left_vec; + std::vector right_vec; + std::vector filtered_cv_matches; + for (size_t j = 0; j < cv_matches.size(); j++) { + int left_ip_index = cv_matches.at(j).queryIdx; + int right_ip_index = cv_matches.at(j).trainIdx; + + Eigen::Vector2d dist_left_ip(left_keypoints.col(left_ip_index)[0], + left_keypoints.col(left_ip_index)[1]); + + Eigen::Vector2d dist_right_ip(right_keypoints.col(right_ip_index)[0], + right_keypoints.col(right_ip_index)[1]); + + Eigen::Vector2d undist_left_ip; + Eigen::Vector2d undist_right_ip; + left_params.Convert + (dist_left_ip, &undist_left_ip); + right_params.Convert + (dist_right_ip, &undist_right_ip); - int left_nav_it = left_it->second; - int right_nav_it = right_it->second; + Eigen::Vector3d X = + dense_map::TriangulatePair(left_params.GetFocalLength(), right_params.GetFocalLength(), + left_world_to_cam, right_world_to_cam, + undist_left_ip, undist_right_ip); + + // Project back into the cameras + Eigen::Vector3d left_cam_X = left_world_to_cam * X; + Eigen::Vector2d undist_left_pix + = left_params.GetFocalVector().cwiseProduct(left_cam_X.hnormalized()); + Eigen::Vector2d dist_left_pix; + left_params.Convert(undist_left_pix, + &dist_left_pix); + + Eigen::Vector3d right_cam_X = right_world_to_cam * X; + Eigen::Vector2d undist_right_pix + = right_params.GetFocalVector().cwiseProduct(right_cam_X.hnormalized()); + Eigen::Vector2d dist_right_pix; + right_params.Convert(undist_right_pix, + &dist_right_pix); + + // Filter out points whose reprojection error is too big + bool is_good = ((dist_left_ip - dist_left_pix).norm() <= reprojection_error && + (dist_right_ip - dist_right_pix).norm() <= reprojection_error); + + // If any values above are Inf or NaN, is_good will be false as well + if (!is_good) continue; + + // Get the keypoints from the good matches + left_vec.push_back(cv::Point2f(left_keypoints.col(left_ip_index)[0], + left_keypoints.col(left_ip_index)[1])); + right_vec.push_back(cv::Point2f(right_keypoints.col(right_ip_index)[0], + right_keypoints.col(right_ip_index)[1])); + + filtered_cv_matches.push_back(cv_matches[j]); + } - if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size()) || - haz_it >= static_cast(haz_cam_intensity_timestamps.size())) - LOG(FATAL) << "Book-keeping error 2."; + if (left_vec.empty()) return; - // Left and right nav cam image time, in haz_cam's time measurement - double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] - + navcam_to_hazcam_timestamp_offset; - double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] - + navcam_to_hazcam_timestamp_offset; - double haz_time = haz_cam_intensity_timestamps[haz_it]; + // Filter using geometry constraints + // These may need some tweaking but works reasonably well. + double ransacReprojThreshold = 20.0; + cv::Mat inlier_mask; + int maxIters = 10000; + double confidence = 0.8; - bool good = (left_time <= haz_time && haz_time < right_time); + // affine2D works better than homography + // cv::Mat H = cv::findHomography(left_vec, right_vec, cv::RANSAC, + // ransacReprojThreshold, inlier_mask, maxIters, confidence); + cv::Mat H = cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, + ransacReprojThreshold, maxIters, confidence); - if (!good) LOG(FATAL) << "Book-keeping error 3."; + std::vector left_ip, right_ip; + for (size_t j = 0; j < filtered_cv_matches.size(); j++) { + int left_ip_index = filtered_cv_matches.at(j).queryIdx; + int right_ip_index = filtered_cv_matches.at(j).trainIdx; - // The current nav it better be either the left or right kind - if (nav_it != left_nav_it && nav_it != right_nav_it) LOG(FATAL) << "Book-keeping error 4."; + if (inlier_mask.at(j, 0) == 0) continue; - // Find the transform from the world to nav cam at the haz cam time - Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; - Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; - double alpha = (haz_time - left_time) / (right_time - left_time); - if (right_time == left_time) alpha = 0.0; // handle division by zero + cv::Mat left_desc = left_descriptors.row(left_ip_index); + cv::Mat right_desc = right_descriptors.row(right_ip_index); - if (!FLAGS_timestamp_interpolation) alpha = round(alpha); + InterestPoint left; + left.setFromCvKeypoint(left_keypoints.col(left_ip_index), left_desc); - Eigen::Affine3d world_to_nav_trans_at_haz_time - = dense_map::linearInterp(alpha, left_nav_trans, right_nav_trans); + InterestPoint right; + right.setFromCvKeypoint(right_keypoints.col(right_ip_index), right_desc); - std::vector const& haz_ip_vec = match_pair.first; // alias - std::vector const& nav_ip_vec = match_pair.second; // alias + left_ip.push_back(left); + right_ip.push_back(right); + } - cv::Mat const& depth_cloud = depth_clouds[haz_it]; // alias + // Update the shared variable using a lock + match_mutex->lock(); - for (size_t ip_it = 0; ip_it < haz_ip_vec.size(); ip_it++) { - // Find the haz cam depth measurement. Use nearest neighbor interpolation - // to look into the depth cloud. - int col = round(haz_ip_vec[ip_it].x); - int row = round(haz_ip_vec[ip_it].y); + // Print the verbose message inside the lock, otherwise the text + // may get messed up. + if (verbose) + std::cout << "Number of matches for pair " + << left_image_index << ' ' << right_image_index << ": " + << left_ip.size() << std::endl; - if (col < 0 || row < 0 || col >= depth_cloud.cols || row >= depth_cloud.rows) - LOG(FATAL) << "Book-keeping error 5."; + *matches = std::make_pair(left_ip, right_ip); + match_mutex->unlock(); + } - // Skip any point that goes out of bounds due to rounding - if (col == depth_cloud.cols || row == depth_cloud.rows) continue; + // Find if a given feature is an inlier, and take care to check that + // the bookkeeping is correct. + int getMapValue(std::vector>> const& pid_cid_fid, + size_t pid, int cid, int fid) { + if (pid_cid_fid.size() <= pid) + LOG(FATAL) << "Current pid is out of range.\n"; - cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + auto& cid_fid_map = pid_cid_fid[pid]; // alias + auto cid_it = cid_fid_map.find(cid); + if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; - // Skip invalid measurements - if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) continue; + auto& fid_map = cid_it->second; // alias + auto fid_it = fid_map.find(fid); + if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; - // Convert to Eigen - Eigen::Vector3d depth_xyz(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + return fid_it->second; + } - Eigen::Vector2d undist_haz_ip; - Eigen::Vector2d undist_nav_ip; - { - // Make sure we don't use the distorted pixels from now on - Eigen::Vector2d haz_ip(haz_ip_vec[ip_it].x, haz_ip_vec[ip_it].y); - Eigen::Vector2d nav_ip(nav_ip_vec[ip_it].x, nav_ip_vec[ip_it].y); - haz_cam_params.Convert(haz_ip, &undist_haz_ip); - nav_cam_params.Convert(nav_ip, &undist_nav_ip); - } + // Set a feature to be an outlier, and take care to check that + // the bookkeeping is correct. + void setMapValue(std::vector>> & pid_cid_fid, + size_t pid, int cid, int fid, int val) { + if (pid_cid_fid.size() <= pid) + LOG(FATAL) << "Current pid is out of range.\n"; - // Ensure the depth point projects well into the haz cam interest point - ceres::CostFunction* depth_to_haz_cost_function = - dense_map::DepthToHazError::Create(undist_haz_ip, depth_xyz, depth_to_haz_block_sizes, - haz_cam_params); - ceres::LossFunction* depth_to_haz_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(depth_to_haz_cost_function, depth_to_haz_loss_function, - &hazcam_depth_to_image_vec[0], - &hazcam_depth_to_image_scale); - residual_names.push_back("haznavhaz1"); - residual_names.push_back("haznavhaz2"); - - // Ensure that the depth points projects well in the nav cam interest point - ceres::CostFunction* depth_to_nav_cost_function - = dense_map::DepthToNavError::Create(undist_nav_ip, depth_xyz, alpha, match_left, - depth_to_nav_block_sizes, nav_cam_params); - ceres::LossFunction* depth_to_nav_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - int left_nav_index = NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it); - int right_nav_index = NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it); - problem.AddResidualBlock(depth_to_nav_cost_function, depth_to_nav_loss_function, - &nav_cam_vec[left_nav_index], - &nav_cam_vec[right_nav_index], - &hazcam_to_navcam_vec[0], &hazcam_depth_to_image_vec[0], - &hazcam_depth_to_image_scale); - residual_names.push_back("haznavnav1"); - residual_names.push_back("haznavnav2"); - - if (FLAGS_fix_map) { - problem.SetParameterBlockConstant(&nav_cam_vec[left_nav_index]); - problem.SetParameterBlockConstant(&nav_cam_vec[right_nav_index]); - } - } + auto& cid_fid_map = pid_cid_fid[pid]; // alias + auto cid_it = cid_fid_map.find(cid); + if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; - return; - } + auto& fid_map = cid_it->second; // alias + auto fid_it = fid_map.find(fid); + if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; - // Prevent the linter from messing up with the beautiful formatting below - void add_haz_sci_cost(// Inputs // NOLINT - int haz_it, int sci_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - double scicam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & haz_cam_intensity_timestamps, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::vector const & sci_cam_timestamps, // NOLINT - std::map const & haz_cam_to_left_nav_cam_index, // NOLINT - std::map const & haz_cam_to_right_nav_cam_index, // NOLINT - std::map const & sci_cam_to_left_nav_cam_index, // NOLINT - std::map const & sci_cam_to_right_nav_cam_index, // NOLINT - camera::CameraParameters const & sci_cam_params, // NOLINT - camera::CameraParameters const & haz_cam_params, // NOLINT - std::vector const & depth_to_sci_block_sizes, // NOLINT - std::vector const & depth_to_haz_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - // Outputs // NOLINT - std::vector & residual_names, // NOLINT - double & hazcam_depth_to_image_scale, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & scicam_to_hazcam_vec, // NOLINT - std::vector & hazcam_depth_to_image_vec, // NOLINT - Eigen::Vector2d & sci_cam_focal_vector, // NOLINT - Eigen::Vector2d & sci_cam_optical_center, // NOLINT - Eigen::VectorXd & sci_cam_distortion, // NOLINT - ceres::Problem & problem) { // NOLINT - auto left_it = haz_cam_to_left_nav_cam_index.find(haz_it); - auto right_it = haz_cam_to_right_nav_cam_index.find(haz_it); - if (left_it == haz_cam_to_left_nav_cam_index.end() || - right_it == haz_cam_to_right_nav_cam_index.end()) - LOG(FATAL) << "Book-keeping error 1 in add_haz_sci_cost."; - - int left_nav_it = left_it->second; - int right_nav_it = right_it->second; - - if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size()) || - haz_it >= static_cast(haz_cam_intensity_timestamps.size())) - LOG(FATAL) << "Book-keeping error 2 in add_haz_sci_cost."; - - // The haz and sci images must be bracketed by the same two nav images - { - auto left_it2 = sci_cam_to_left_nav_cam_index.find(sci_it); - auto right_it2 = sci_cam_to_right_nav_cam_index.find(sci_it); - if (left_it2 == sci_cam_to_left_nav_cam_index.end() || - right_it2 == sci_cam_to_right_nav_cam_index.end()) - LOG(FATAL) << "Book-keeping error 3 in add_haz_sci_cost."; - - int left_nav_it2 = left_it2->second; - int right_nav_it2 = right_it2->second; - - if (left_nav_it2 != left_nav_it || right_nav_it2 != right_nav_it) - LOG(FATAL) << "Book-keeping error 4 in add_haz_sci_cost."; - } - // Left and right nav cam image time, in haz_cam's time measurement - double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] - + navcam_to_hazcam_timestamp_offset; - double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] - + navcam_to_hazcam_timestamp_offset; - - // Find the haz and sci time and convert them to haz cam's clock - double haz_time = haz_cam_intensity_timestamps[haz_it]; - double sci_time = sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; - - bool good1 = (left_time <= haz_time && haz_time < right_time); - if (!good1) LOG(FATAL) << "Book-keeping error 5 in add_haz_sci_cost."; - - bool good2 = (left_time <= sci_time && sci_time < right_time); - if (!good2) LOG(FATAL) << "Book-keeping error 6 in add_haz_sci_cost."; - - // Find the transform from the world to nav cam at the haz cam time - Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; - Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; - - double alpha_haz = (haz_time - left_time) / (right_time - left_time); - if (right_time == left_time) alpha_haz = 0.0; // handle division by zero - - if (!FLAGS_timestamp_interpolation) alpha_haz = round(alpha_haz); - - Eigen::Affine3d world_to_nav_trans_at_haz_time = - dense_map::linearInterp(alpha_haz, left_nav_trans, right_nav_trans); - - double alpha_sci = (sci_time - left_time) / (right_time - left_time); - if (right_time == left_time) alpha_sci = 0.0; // handle division by zero - - if (!FLAGS_timestamp_interpolation) - alpha_sci = round(alpha_sci); - - Eigen::Affine3d world_to_nav_trans_at_sci_time = - dense_map::linearInterp(alpha_sci, left_nav_trans, right_nav_trans); - - std::vector const& haz_ip_vec = match_pair.first; - std::vector const& sci_ip_vec = match_pair.second; - - cv::Mat const& depth_cloud = depth_clouds[haz_it]; - - for (size_t ip_it = 0; ip_it < haz_ip_vec.size(); ip_it++) { - // Find the haz cam depth measurement. Use nearest neighbor interpolation - // to look into the depth cloud. - int col = round(haz_ip_vec[ip_it].x); - int row = round(haz_ip_vec[ip_it].y); - - if (col < 0 || row < 0 || col >= depth_cloud.cols || row >= depth_cloud.rows) - LOG(FATAL) << "Book-keeping error 7 in add_haz_sci_cost."; - - // Skip any point that goes out of bounds due to rounding - if (col == depth_cloud.cols || row == depth_cloud.rows) continue; - - cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); - - // Skip invalid measurements - if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) continue; - - // Convert to Eigen - Eigen::Vector3d depth_xyz(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); - - // Apply undistortion. Must take great care to not mix up - // distorted and undistorted pixels. - Eigen::Vector2d dist_haz_ip(haz_ip_vec[ip_it].x, haz_ip_vec[ip_it].y); - Eigen::Vector2d dist_sci_ip(sci_ip_vec[ip_it].x, sci_ip_vec[ip_it].y); - Eigen::Vector2d undist_haz_ip; - Eigen::Vector2d undist_sci_ip; - haz_cam_params.Convert(dist_haz_ip, &undist_haz_ip); - sci_cam_params.Convert(dist_sci_ip, &undist_sci_ip); - - // Ensure the depth point projects well into the haz cam interest point - ceres::CostFunction* depth_to_haz_cost_function = - dense_map::DepthToHazError::Create(undist_haz_ip, depth_xyz, depth_to_haz_block_sizes, haz_cam_params); - ceres::LossFunction* depth_to_haz_loss_function = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(depth_to_haz_cost_function, depth_to_haz_loss_function, &hazcam_depth_to_image_vec[0], - &hazcam_depth_to_image_scale); - residual_names.push_back("hazscihaz1"); - residual_names.push_back("hazscihaz2"); - - // Ensure that the depth points projects well in the sci cam interest point. - // Note how we pass a distorted sci cam pix, as in that error function we will - // take the difference of distorted pixels. - ceres::CostFunction* depth_to_sci_cost_function - = dense_map::DepthToSciError::Create(dist_sci_ip, depth_xyz, alpha_haz, - alpha_sci, depth_to_sci_block_sizes, sci_cam_params); - ceres::LossFunction* depth_to_sci_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(depth_to_sci_cost_function, depth_to_sci_loss_function, - &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)], - &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)], - &hazcam_to_navcam_vec[0], &scicam_to_hazcam_vec[0], &hazcam_depth_to_image_vec[0], - &hazcam_depth_to_image_scale, &sci_cam_focal_vector[0], &sci_cam_optical_center[0], - &sci_cam_distortion[0]); - - residual_names.push_back("hazscisci1"); - residual_names.push_back("hazscisci2"); - - if (FLAGS_fix_map) { - problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)]); - problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)]); - } - } - - return; + fid_it->second = val; } - // Prevent the linter from messing up with the beautiful formatting below - void add_nav_sci_cost - (// Inputs // NOLINT - int nav_it, int sci_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - double scicam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::vector const & sci_cam_timestamps, // NOLINT - std::map const & sci_cam_to_left_nav_cam_index, // NOLINT - std::map const & sci_cam_to_right_nav_cam_index, // NOLINT - Eigen::Affine3d const & hazcam_to_navcam_aff_trans, // NOLINT - Eigen::Affine3d const & scicam_to_hazcam_aff_trans, // NOLINT - camera::CameraParameters const & nav_cam_params, // NOLINT - camera::CameraParameters const & sci_cam_params, // NOLINT - std::vector const & nav_block_sizes, // NOLINT - std::vector const & sci_block_sizes, // NOLINT - std::vector const & mesh_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - mve::TriangleMesh::Ptr const & mesh, // NOLINT - std::shared_ptr const & bvh_tree, // NOLINT - // Outputs // NOLINT - int & nav_sci_xyz_count, // NOLINT - std::vector & residual_names, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & scicam_to_hazcam_vec, // NOLINT - Eigen::Vector2d & sci_cam_focal_vector, // NOLINT - Eigen::Vector2d & sci_cam_optical_center, // NOLINT - Eigen::VectorXd & sci_cam_distortion, // NOLINT - std::vector & initial_nav_sci_xyz, // NOLINT - std::vector & nav_sci_xyz, // NOLINT - ceres::Problem & problem) { // NOLINT - auto left_it = sci_cam_to_left_nav_cam_index.find(sci_it); - auto right_it = sci_cam_to_right_nav_cam_index.find(sci_it); - if (left_it == sci_cam_to_left_nav_cam_index.end() || - right_it == sci_cam_to_right_nav_cam_index.end()) - LOG(FATAL) << "Book-keeping error 1 in add_sci_sci_cost."; - - int left_nav_it = left_it->second; - int right_nav_it = right_it->second; - - if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size())) - LOG(FATAL) << "Book-keeping error 1 in add_nav_sci_cost."; - - // Figure out the two nav cam indices bounding the current sci cam - bool match_left = false; - if (sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset >= - sparse_map_timestamps[nav_cam_start + nav_it] + navcam_to_hazcam_timestamp_offset) { - match_left = true; - } else { - match_left = false; // match right then - } +} // namespace dense_map - // Left and right nav cam image time, and sci cam time, in haz_cam's time measurement - double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] + navcam_to_hazcam_timestamp_offset; - double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] + navcam_to_hazcam_timestamp_offset; - double sci_time = sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; - bool good = (left_time <= sci_time && sci_time < right_time); + std::cout.precision(17); // to be able to print timestamps - if (!good) LOG(FATAL) << "Book-keeping 2 in add_nav_sci_cost."; + if (FLAGS_ros_bag.empty()) + LOG(FATAL) << "The bag file was not specified."; + if (FLAGS_sparse_map.empty()) + LOG(FATAL) << "The input sparse map was not specified."; - // Find the transform from the world to nav cam at the sci cam time - Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; - Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; - double alpha = (sci_time - left_time) / (right_time - left_time); - if (right_time == left_time) alpha = 0.0; // handle division by zero + if (FLAGS_output_map.empty()) + LOG(FATAL) << "The output sparse map was not specified."; - if (!FLAGS_timestamp_interpolation) alpha = round(alpha); + if (FLAGS_robust_threshold <= 0.0) + LOG(FATAL) << "The robust threshold must be positive.\n"; - Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(alpha, left_nav_trans, right_nav_trans); + if (FLAGS_bracket_len <= 0.0) LOG(FATAL) << "Bracket length must be positive."; - // Find the sci cam to world transform - Eigen::Affine3d interp_world_to_sci_trans = - scicam_to_hazcam_aff_trans.inverse() * - hazcam_to_navcam_aff_trans.inverse() * - interp_world_to_nav_trans; + if (FLAGS_num_overlaps < 1) LOG(FATAL) << "Number of overlaps must be positive."; - Eigen::Affine3d world_to_nav_trans; - if (match_left) { - world_to_nav_trans = left_nav_trans; - } else { - world_to_nav_trans = right_nav_trans; - } + if (FLAGS_timestamp_offsets_max_change < 0) + LOG(FATAL) << "The timestamp offsets must be non-negative."; - std::vector const& sci_ip_vec = match_pair.first; - std::vector const& nav_ip_vec = match_pair.second; + if (FLAGS_refiner_min_angle <= 0.0) + LOG(FATAL) << "The min triangulation angle must be positive.\n"; - for (size_t ip_it = 0; ip_it < sci_ip_vec.size(); ip_it++) { - Eigen::Vector2d dist_sci_ip(sci_ip_vec[ip_it].x, sci_ip_vec[ip_it].y); - Eigen::Vector2d dist_nav_ip(nav_ip_vec[ip_it].x, nav_ip_vec[ip_it].y); - Eigen::Vector2d undist_nav_ip; - Eigen::Vector2d undist_sci_ip; - nav_cam_params.Convert(dist_nav_ip, &undist_nav_ip); - sci_cam_params.Convert(dist_sci_ip, &undist_sci_ip); + if (FLAGS_depth_tri_weight < 0.0) + LOG(FATAL) << "The depth weight must non-negative.\n"; - Eigen::Vector3d X = - dense_map::TriangulatePair(sci_cam_params.GetFocalLength(), nav_cam_params.GetFocalLength(), - interp_world_to_sci_trans, world_to_nav_trans, undist_sci_ip, undist_nav_ip); - - bool have_mesh_intersection = false; - if (FLAGS_mesh != "") { - // Try to make the intersection point be on the mesh and the nav cam ray - // to make the sci cam to conform to that. - // TODO(oalexan1): Think more of the range of the ray below - double min_ray_dist = 0.0; - double max_ray_dist = 10.0; - Eigen::Vector3d intersection(0.0, 0.0, 0.0); - have_mesh_intersection - = dense_map::ray_mesh_intersect(undist_nav_ip, nav_cam_params, - world_to_nav_trans, mesh, bvh_tree, - min_ray_dist, max_ray_dist, - // Output - intersection); - - // Overwrite the triangulation result above with the intersection - if (have_mesh_intersection) X = intersection; - } + if (FLAGS_mesh_tri_weight < 0.0) + LOG(FATAL) << "The mesh weight must non-negative.\n"; - // Record the triangulated positions. These will be optimized. - for (int i = 0; i < dense_map::NUM_XYZ_PARAMS; i++) - nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count + i] = X[i]; + if (FLAGS_depth_mesh_weight < 0.0) + LOG(FATAL) << "The depth mesh weight must non-negative.\n"; - // A copy of the triangulated positions which won't be optimized. - initial_nav_sci_xyz[nav_sci_xyz_count] = X; + if (FLAGS_nav_cam_num_exclude_boundary_pixels < 0) + LOG(FATAL) << "Must have a non-negative value for --nav_cam_num_exclude_boundary_pixels.\n"; - // The cost function of projecting in the sci cam. Note that we use dist_sci_ip, - // as in the cost function below we will do differences of distorted sci cam pixels. - ceres::CostFunction* sci_cost_function = - dense_map::SciError::Create(dist_sci_ip, alpha, sci_block_sizes, sci_cam_params); - ceres::LossFunction* sci_loss_function = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + if (FLAGS_nav_cam_distortion_replacement != "") { + if (FLAGS_haz_cam_intrinsics_to_float != "" || + FLAGS_sci_cam_intrinsics_to_float != "" || + FLAGS_extrinsics_to_float != "" || + FLAGS_float_sparse_map || + FLAGS_float_scale || + FLAGS_float_timestamp_offsets) + LOG(FATAL) << "If replacing and optimizing the nav_cam model distortion, the rest " + "of the variables must be kept fixed. Once this model is found and saved, " + "a subsequent call to this tool may do various co-optimizations."; + } - problem.AddResidualBlock(sci_cost_function, sci_loss_function, - &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)], - &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)], - &hazcam_to_navcam_vec[0], &scicam_to_hazcam_vec[0], - &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count], &sci_cam_focal_vector[0], - &sci_cam_optical_center[0], &sci_cam_distortion[0]); + if (FLAGS_registration && (FLAGS_xyz_file.empty() || FLAGS_hugin_file.empty())) + LOG(FATAL) << "In order to register the map, the hugin and xyz file must be specified."; + + // We assume our camera rig has n camera types. Each can be image or + // depth + image. Just one camera must be the reference camera. In + // this code it will be nav_cam. Every camera object (class + // cameraImage) knows its type (an index), which can be used to look + // up its intrinsics, image topic, depth topic (if present), + // ref_to_cam_timestamp_offset, and ref_to_cam_transform. A camera + // object also stores its image, depth cloud (if present), its + // timestamp, and indices pointing to its left and right ref + // bracketing cameras. + + // For every instance of a reference camera its + // ref_to_cam_timestamp_offset is 0 and kept fixed, + // ref_to_cam_transform is the identity and kept fixed, and the + // indices pointing to the left and right ref bracketing cameras are + // identical. + + // The info below will eventually come from a file + int num_cam_types = 3; + int ref_cam_type = 0; // Below we assume the starting cam is the ref cam + + // Image and depth topics + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", + "/hw/depth_haz/extended/amplitude_int", + "/hw/cam_sci/compressed"}; + std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; - residual_names.push_back("navscisci1"); - residual_names.push_back("navscisci2"); + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d nav_cam_to_body_trans; + Eigen::Affine3d haz_cam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, nav_cam_to_body_trans, + haz_cam_depth_to_image_transform); + std::vector orig_cam_params = cam_params; - if (FLAGS_fix_map) { - problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)]); - problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)]); + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_names.size(); it++) { + if (cam_names[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + ref_to_cam_timestamp_offsets[it] = new_val; } + } + } + + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; + std::shared_ptr bvh_tree; + if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); - // The nav cam cost function - ceres::CostFunction* nav_cost_function = - dense_map::NavError::Create(undist_nav_ip, nav_block_sizes, nav_cam_params); - ceres::LossFunction* nav_loss_function = - dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + // If desired to process only specific timestamps + std::set sci_cam_timestamps_to_use; + if (FLAGS_sci_cam_timestamps != "") { + std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); + double val; + while (ifs >> val) sci_cam_timestamps_to_use.insert(val); + } - problem.AddResidualBlock(nav_cost_function, nav_loss_function, - &nav_cam_vec[(nav_cam_start + nav_it) * NUM_RIGID_PARAMS], - &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count]); - residual_names.push_back("navscinav1"); - residual_names.push_back("navscinav2"); + double haz_cam_depth_to_image_scale + = pow(haz_cam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); - if (FLAGS_fix_map) - problem.SetParameterBlockConstant(&nav_cam_vec[(nav_cam_start + nav_it) - * NUM_RIGID_PARAMS]); + // Separate the initial scale. This is convenient if + // haz_cam_depth_to_image is scale * rotation + translation and if + // it is desired to keep the scale fixed. In either case, the scale + // will be multiplied back when needed. + Eigen::Affine3d haz_cam_normalized_depth_to_image = haz_cam_depth_to_image_transform; + haz_cam_normalized_depth_to_image.linear() /= haz_cam_depth_to_image_scale; - if (have_mesh_intersection) { - // Constrain the sci cam texture to agree with the nav cam texture on the mesh + // Read the sparse map + boost::shared_ptr sparse_map = + boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); - ceres::CostFunction* mesh_cost_function = - dense_map::XYZError::Create(initial_nav_sci_xyz[nav_sci_xyz_count], - mesh_block_sizes, FLAGS_mesh_weight); + // Deal with using a non-FOV model for nav_cam, if desired + Eigen::VectorXd nav_cam_distortion_replacement; + if (FLAGS_nav_cam_distortion_replacement != "") { + std::vector vec = dense_map::string_to_vector(FLAGS_nav_cam_distortion_replacement); + if (vec.size() != 4 && vec.size() != 5) + LOG(FATAL) << "nav_cam distortion replacement must consist of 4 or 5 values, corresponding" + << "to radial and tangential distortion coefficients.\n"; + nav_cam_distortion_replacement + = Eigen::Map(vec.data(), vec.size()); + } - ceres::LossFunction* mesh_loss_function = - dense_map::GetLossFunction("cauchy", FLAGS_mesh_robust_threshold); + if (FLAGS_refiner_skip_filtering && + (cam_params[ref_cam_type].GetDistortion().size() > 1 || + nav_cam_distortion_replacement.size() != 0)) + LOG(FATAL) << "Must use outlier filtering if a non-fisheye lens distortion is used" + << "with nav_cam, as it is hard to to fit such a model at the image periphery.\n"; - problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, - &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count]); + // TODO(oalexan1): All this timestamp reading logic below should be in a function - residual_names.push_back("mesh_x"); - residual_names.push_back("mesh_y"); - residual_names.push_back("mesh_z"); - } + // Parse the ref timestamps from the sparse map + // We assume the sparse map image names are the timestamps. + std::vector ref_timestamps; + const std::vector& sparse_map_images = sparse_map->cid_to_filename_; + ref_timestamps.resize(sparse_map_images.size()); + for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { + double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); + ref_timestamps[cid] = timestamp; + } + if (ref_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; - // Very important, go forward in the vector of xyz. Do it at the end. - nav_sci_xyz_count++; + // Will optimize the nav cam poses as part of the process + std::vector & world_to_ref_t = sparse_map->cid_to_cam_t_global_; // alias + + // Put transforms of the reference cameras in a vector. We will optimize them. + int num_ref_cams = world_to_ref_t.size(); + if (world_to_ref_t.size() != ref_timestamps.size()) + LOG(FATAL) << "Must have as many ref cam timestamps as ref cameras.\n"; + std::vector world_to_ref_vec(num_ref_cams * dense_map::NUM_RIGID_PARAMS); + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::rigid_transform_to_array(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + + std::vector> cam_timestamps_to_use = {std::set(), + std::set(), + sci_cam_timestamps_to_use}; + + // Which intrinsics from which cameras to float + std::vector> intrinsics_to_float(num_cam_types); + dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); + dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); + dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); + + std::string depth_to_image_name = "depth_to_image"; + std::set extrinsics_to_float; + dense_map::parse_extrinsics_to_float(cam_names, cam_names[ref_cam_type], depth_to_image_name, + FLAGS_extrinsics_to_float, extrinsics_to_float); + + if (FLAGS_float_scale && FLAGS_affine_depth_to_image) + LOG(FATAL) << "The options --float_scale and --affine_depth_to_image should not be used " + << "together. If the latter is used, the scale is always floated.\n"; + + if (!FLAGS_affine_depth_to_image && FLAGS_float_scale && + extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + LOG(FATAL) << "Cannot float the scale of depth_to_image_transform unless this " + << "this is allowed as part of --extrinsics_to_float.\n"; + + if (FLAGS_nav_cam_distortion_replacement != "") { + if (intrinsics_to_float[ref_cam_type].find("distortion") + == intrinsics_to_float[ref_cam_type].end() || + intrinsics_to_float[ref_cam_type].size() != 1) { + LOG(FATAL) << "When --nav_cam_distortion_replacement is used, must float the nav cam " + << "distortion and no other nav cam intrinsics.\n"; } + } - return; + // Put in arrays, so we can optimize them + std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::rigid_transform_to_array + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Set up the variable blocks to optimize for BracketedDepthError + int num_depth_params = dense_map::NUM_RIGID_PARAMS; + if (FLAGS_affine_depth_to_image) num_depth_params = dense_map::NUM_AFFINE_PARAMS; + + // Depth to image transforms and scales + std::vector normalized_depth_to_image; + std::vector depth_to_image_scales = {1.0, haz_cam_depth_to_image_scale, 1.0}; + normalized_depth_to_image.push_back(Eigen::Affine3d::Identity()); + normalized_depth_to_image.push_back(haz_cam_normalized_depth_to_image); + normalized_depth_to_image.push_back(Eigen::Affine3d::Identity()); + + // Put in arrays, so we can optimize them + std::vector normalized_depth_to_image_vec(num_cam_types * num_depth_params); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (FLAGS_affine_depth_to_image) + dense_map::affine_transform_to_array + (normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); + else + dense_map::rigid_transform_to_array + (normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); } - void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { - int raw_image_cols = image.cols; - int raw_image_rows = image.rows; - int calib_image_cols = cam_params.GetDistortedSize()[0]; - int calib_image_rows = cam_params.GetDistortedSize()[1]; + // Put the intrinsics in arrays + std::vector focal_lengths(num_cam_types); + std::vector optical_centers(num_cam_types); + std::vector distortions(num_cam_types); + for (int it = 0; it < num_cam_types; it++) { + focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths + optical_centers[it] = cam_params[it].GetOpticalOffset(); + + if (cam_params[it].GetDistortion().size() == 0) + LOG(FATAL) << "The cameras are expected to have distortion."; + distortions[it] = cam_params[it].GetDistortion(); + } - int factor = raw_image_cols / calib_image_cols; + // Build a map for quick access for all the messages we may need + // TODO(oalexan1): Must the view be kept open for this to work? + std::vector topics; + for (auto it = 0; it < image_topics.size(); it++) + if (image_topics[it] != "") topics.push_back(image_topics[it]); + for (auto it = 0; it < depth_topics.size(); it++) + if (depth_topics[it] != "") topics.push_back(depth_topics[it]); + rosbag::Bag bag; + bag.open(FLAGS_ros_bag, rosbag::bagmode::Read); + rosbag::View view(bag, rosbag::TopicQuery(topics)); + std::map> bag_map; + dense_map::indexMessages(view, bag_map); + + // A lot of care is needed here. This remembers how we travel in time + // for each camera type so we have fewer messages to search. + // But if a mistake is done below it will mess up this bookkeeping. + std::vector image_start_positions(num_cam_types, 0); + std::vector cloud_start_positions(num_cam_types, 0); + + // Cannot add a (positive) value more this to + // ref_to_cam_timestamp_offsets[cam_type] before getting out of the + // bracket. + std::vector upper_bound(num_cam_types, 1.0e+100); + + // Cannot add a (negative) value less than this to + // ref_to_cam_timestamp_offsets[cam_type] before getting out of the + // bracket. + std::vector lower_bound(num_cam_types, -1.0e+100); + + std::cout << "Bracketing the images in time." << std::endl; + + // Populate the data for each camera image + std::vector cams; + for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { + if (ref_cam_type != 0) + LOG(FATAL) << "It is assumed that the ref cam type is 0."; + + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + dense_map::cameraImage cam; + bool success = false; + if (cam_type == ref_cam_type) { + cam.camera_type = cam_type; + cam.timestamp = ref_timestamps[ref_it]; + cam.ref_timestamp = cam.timestamp; // the time offset is 0 between ref and itself + cam.beg_ref_index = ref_it; + cam.end_ref_index = ref_it; + + // Search the whole set of timestamps, so set start_pos = + // 0. This is slower but more robust than keeping track of how + // we move in the increasing order of time. + int start_pos = 0; + bool save_grayscale = true; // for matching we will need grayscale + double found_time = -1.0; + // this has to succeed since we picked the ref images in the map + if (!dense_map::lookupImage(cam.timestamp, bag_map[image_topics[cam_type]], save_grayscale, + // outputs + cam.image, image_start_positions[cam_type], // care here + found_time)) + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up camera at time " << cam.timestamp << ".\n"; + + // The exact time is expected + if (found_time != cam.timestamp) + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up camera at time " << cam.timestamp << ".\n"; + + success = true; + + } else { + if (ref_it + 1 >= num_ref_cams) break; // Arrived at the end, cannot do a bracket + + // Convert the bracketing timestamps to current cam's time + double ref_to_cam_offset = ref_to_cam_timestamp_offsets[cam_type]; + double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_offset; + double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_offset; + + if (right_timestamp <= left_timestamp) + LOG(FATAL) << "Ref timestamps must be in increasing order.\n"; + + if (right_timestamp - left_timestamp > FLAGS_bracket_len) + continue; // Must respect the bracket length + + // Find the image timestamp closest to the midpoint of the brackets. This will give + // more room to vary the timestamp later. + double mid_timestamp = (left_timestamp + right_timestamp)/2.0; + + // Search forward in time from image_start_positions[cam_type]. + // We will update that too later. One has to be very careful + // with it so it does not go too far forward in time + // so that at the next iteration we are passed what we + // search for. + int start_pos = image_start_positions[cam_type]; // care here + bool save_grayscale = true; // for matching we will need grayscale + double curr_timestamp = left_timestamp; // start here + cv::Mat best_image; + double best_dist = 1.0e+100; + double best_time = -1.0, found_time = -1.0; + while (1) { + if (found_time >= right_timestamp) break; // out of range + + cv::Mat image; + if (!dense_map::lookupImage(curr_timestamp, bag_map[image_topics[cam_type]], + save_grayscale, + // outputs + image, + start_pos, // care here + found_time)) + break; // Need not succeed, but then there's no need to go on are we are at the end + + double curr_dist = std::abs(found_time - mid_timestamp); + if (curr_dist < best_dist) { + best_dist = curr_dist; + best_time = found_time; + // Update the start position for the future only if this is a good + // solution. Otherwise we may have moved too far. + image_start_positions[cam_type] = start_pos; + image.copyTo(best_image); + } + + // Go forward in time. We count on the fact that + // lookupImage() looks forward from given guess. + curr_timestamp = std::nextafter(found_time, 1.01 * found_time); + } - if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { - LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" - << "Calibrated image width and height are: " - << calib_image_cols << ' ' << calib_image_rows << "\n" - << "These must be equal up to an integer factor.\n"; - } + if (best_time < 0.0) continue; // bracketing failed - if (factor != 1) { - // TODO(oalexan1): This kind of resizing may be creating aliased images. - cv::Mat local_image; - cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); - local_image.copyTo(image); - } + // Note how we allow best_time == left_timestamp if there's no other choice + if (best_time < left_timestamp || best_time >= right_timestamp) continue; // no luck - // Check - if (image.cols != calib_image_cols || image.rows != calib_image_rows) - LOG(FATAL) << "Sci cam images have the wrong size."; - } + cam.camera_type = cam_type; + cam.timestamp = best_time; + cam.ref_timestamp = best_time - ref_to_cam_offset; + cam.beg_ref_index = ref_it; + cam.end_ref_index = ref_it + 1; + cam.image = best_image; - void select_images_to_match(// Inputs // NOLINT - double haz_cam_start_time, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - double scicam_to_hazcam_timestamp_offset, // NOLINT - std::vector const& sparse_map_timestamps, // NOLINT - std::vector const& all_haz_cam_inten_timestamps, // NOLINT - std::vector const& all_sci_cam_timestamps, // NOLINT - std::set const& sci_cam_timestamps_to_use, // NOLINT - dense_map::RosBagHandle const& nav_cam_handle, // NOLINT - dense_map::RosBagHandle const& sci_cam_handle, // NOLINT - dense_map::RosBagHandle const& haz_cam_points_handle, // NOLINT - dense_map::RosBagHandle const& haz_cam_intensity_handle, // NOLINT - camera::CameraParameters const& sci_cam_params, // NOLINT - // Outputs // NOLINT - int& nav_cam_start, // NOLINT - std::vector& cid_to_image_type, // NOLINT - std::vector& haz_cam_intensity_timestamps, // NOLINT - std::vector& sci_cam_timestamps, // NOLINT - std::vector& images, // NOLINT - std::vector& depth_clouds) { // NOLINT - // Wipe the outputs - nav_cam_start = -1; - cid_to_image_type.clear(); - haz_cam_intensity_timestamps.clear(); - sci_cam_timestamps.clear(); - images.clear(); - depth_clouds.clear(); - - bool stop_early = false; - double found_time = -1.0; - bool save_grayscale = true; // feature detection needs grayscale - - double navcam_to_scicam_timestamp_offset - = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; - - // Use these to keep track where in the bags we are. After one - // traversal forward in time they need to be reset. - int nav_cam_pos = 0, haz_cam_intensity_pos = 0, haz_cam_cloud_pos = 0, sci_cam_pos = 0; - - for (size_t map_it = 0; map_it + 1 < sparse_map_timestamps.size(); map_it++) { - if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { - // The case when we would like to start later. Note the second - // comparison after "&&". When FLAG_start is 0, we want to - // make sure if the first nav image from the bag is in the map - // we use it, so we don't skip it even if based on - // navcam_to_hazcam_timestamp_offset we should. - if (sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset - < FLAGS_start + haz_cam_start_time && - sparse_map_timestamps[map_it] < FLAGS_start + haz_cam_start_time) - continue; + upper_bound[cam_type] = std::min(upper_bound[cam_type], best_time - left_timestamp); + lower_bound[cam_type] = std::max(lower_bound[cam_type], best_time - right_timestamp); + success = true; } - if (nav_cam_start < 0) nav_cam_start = map_it; - - images.push_back(cv::Mat()); - if (!dense_map::lookupImage(sparse_map_timestamps[map_it], nav_cam_handle.bag_msgs, - save_grayscale, images.back(), - nav_cam_pos, found_time)) { - LOG(FATAL) << std::fixed << std::setprecision(17) - << "Cannot look up nav cam at time " << sparse_map_timestamps[map_it] << ".\n"; - } - cid_to_image_type.push_back(dense_map::NAV_CAM); - - if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { - // If we would like to end earlier, then save the last nav cam image so far - // and quit - if (sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset > - FLAGS_start + FLAGS_duration + haz_cam_start_time) { - stop_early = true; - break; - } + // See if to skip this timestamp + if (!cam_timestamps_to_use[cam_type].empty() && + cam_timestamps_to_use[cam_type].find(cam.timestamp) == + cam_timestamps_to_use[cam_type].end()) { + std::cout << std::setprecision(17) << "For " << cam_names[cam_type] + << " skipping timestamp: " << cam.timestamp << std::endl; + continue; } - // Do not look up sci cam and haz cam images in time intervals bigger than this - if (std::abs(sparse_map_timestamps[map_it + 1] - sparse_map_timestamps[map_it]) > FLAGS_bracket_len) continue; - - // Append at most two haz cam images between consecutive sparse - // map timestamps, close to these sparse map timestamps. - std::vector local_haz_timestamps; - dense_map::pickTimestampsInBounds(all_haz_cam_inten_timestamps, - sparse_map_timestamps[map_it], - sparse_map_timestamps[map_it + 1], - -navcam_to_hazcam_timestamp_offset, - local_haz_timestamps); - - for (size_t samp_it = 0; samp_it < local_haz_timestamps.size(); samp_it++) { - haz_cam_intensity_timestamps.push_back(local_haz_timestamps[samp_it]); - - double nav_start = sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - double haz_time = local_haz_timestamps[samp_it] - haz_cam_start_time; - double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - - std::cout << "nav_start haz nav_end times " - << nav_start << ' ' << haz_time << ' ' << nav_end << std::endl; - std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; - - // Read the image - images.push_back(cv::Mat()); - if (!dense_map::lookupImage(haz_cam_intensity_timestamps.back(), - haz_cam_intensity_handle.bag_msgs, - save_grayscale, images.back(), haz_cam_intensity_pos, - found_time)) - LOG(FATAL) << "Cannot look up haz cam image at given time"; - cid_to_image_type.push_back(dense_map::HAZ_CAM); - - double cloud_time = -1.0; - depth_clouds.push_back(cv::Mat()); - if (!dense_map::lookupCloud(haz_cam_intensity_timestamps.back(), - haz_cam_points_handle.bag_msgs, - FLAGS_max_haz_cam_image_to_depth_timestamp_diff, - depth_clouds.back(), - haz_cam_cloud_pos, cloud_time)) { - // This need not succeed always - } + if (!success) continue; + + // This can be useful in checking if all the sci cams were bracketed successfully. + // std::cout << std::setprecision(17) << "For camera " + // << cam_names[cam_type] << " pick timestamp " + // << cam.timestamp << ".\n"; + + if (depth_topics[cam_type] != "") { + cam.cloud_timestamp = -1.0; // will change + cv::Mat cloud; + // Look up the closest cloud in time (either before or after cam.timestamp) + // This need not succeed. + dense_map::lookupCloud(cam.timestamp, bag_map[depth_topics[cam_type]], + FLAGS_max_haz_cam_image_to_depth_timestamp_diff, + // Outputs + cam.depth_cloud, + cloud_start_positions[cam_type], // care here + cam.cloud_timestamp); } - // Append at most two sci cam images between consecutive sparse - // map timestamps, close to these sparse map timestamps. - - std::vector local_sci_timestamps; - dense_map::pickTimestampsInBounds(all_sci_cam_timestamps, sparse_map_timestamps[map_it], - sparse_map_timestamps[map_it + 1], - -navcam_to_scicam_timestamp_offset, - local_sci_timestamps); - - // Append to the vector of sampled timestamps - for (size_t samp_it = 0; samp_it < local_sci_timestamps.size(); samp_it++) { - // See if to use only specified timestamps - if (!sci_cam_timestamps_to_use.empty() && - sci_cam_timestamps_to_use.find(local_sci_timestamps[samp_it]) == - sci_cam_timestamps_to_use.end()) - continue; + cams.push_back(cam); + } // end loop over camera types + } // end loop over ref images - sci_cam_timestamps.push_back(local_sci_timestamps[samp_it]); - - double nav_start = sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - double sci_time = local_sci_timestamps[samp_it] + scicam_to_hazcam_timestamp_offset - - haz_cam_start_time; - double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - std::cout << "nav_start sci nav_end times " - << nav_start << ' ' << sci_time << ' ' << nav_end << std::endl; - std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; - - // Read the sci cam image, and perhaps adjust its size - images.push_back(cv::Mat()); - cv::Mat local_img; - if (!dense_map::lookupImage(sci_cam_timestamps.back(), sci_cam_handle.bag_msgs, - save_grayscale, local_img, - sci_cam_pos, found_time)) - LOG(FATAL) << "Cannot look up sci cam image at given time."; - adjustImageSize(sci_cam_params, local_img); - local_img.copyTo(images.back()); - - // Sanity check - Eigen::Vector2i sci_cam_size = sci_cam_params.GetDistortedSize(); - if (images.back().cols != sci_cam_size[0] || images.back().rows != sci_cam_size[1]) - LOG(FATAL) << "Sci cam images have the wrong size."; - - cid_to_image_type.push_back(dense_map::SCI_CAM); - } - } // End iterating over nav cam timestamps - - // Add the last nav cam image from the map, unless we stopped early and this was done - if (!stop_early) { - images.push_back(cv::Mat()); - if (!dense_map::lookupImage(sparse_map_timestamps.back(), nav_cam_handle.bag_msgs, - save_grayscale, images.back(), - nav_cam_pos, found_time)) - LOG(FATAL) << "Cannot look up nav cam image at given time."; - cid_to_image_type.push_back(dense_map::NAV_CAM); - } + std::cout << "Allowed timestamp offset range while respecting the brackets for given cameras:\n"; + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here - if (images.size() > sparse_map_timestamps.size() + haz_cam_intensity_timestamps.size() - + sci_cam_timestamps.size()) - LOG(FATAL) << "Book-keeping error in select_images_to_match."; + // So far we had the relative change. Now add the actual offset to get the max allowed offset. + lower_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + upper_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; - return; + std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] + << ", " << upper_bound[cam_type] << "]\n"; } - void set_up_block_sizes(int num_scicam_focal_lengths, int num_scicam_distortions, - std::vector & depth_to_haz_block_sizes, - std::vector & nav_block_sizes, - std::vector & depth_to_nav_block_sizes, - std::vector & depth_to_sci_block_sizes, - std::vector & sci_block_sizes, - std::vector & mesh_block_sizes) { - // Wipe the outputs - depth_to_haz_block_sizes.clear(); - nav_block_sizes.clear(); - depth_to_nav_block_sizes.clear(); - depth_to_sci_block_sizes.clear(); - sci_block_sizes.clear(); - mesh_block_sizes.clear(); - - // Set up the variable blocks to optimize for DepthToHazError - depth_to_haz_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_haz_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - - // Set up the variable blocks to optimize for NavError - nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - nav_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - - // Set up the variable blocks to optimize for DepthToNavError - depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - - // Set up the variable blocks to optimize for DepthToSciError - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - depth_to_sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length - depth_to_sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center - depth_to_sci_block_sizes.push_back(num_scicam_distortions); // distortion - - // Set up the variable blocks to optimize for SciError - sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - sci_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length - sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center - sci_block_sizes.push_back(num_scicam_distortions); // distortion - - // Set up the variable blocks to optimize for the mesh xyz error - mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + if (FLAGS_float_timestamp_offsets) { + std::cout << "Given the user constraint the timestamp offsets will float in these ranges:\n"; + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here + + lower_bound[cam_type] = std::max(lower_bound[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + - FLAGS_timestamp_offsets_max_change); + + upper_bound[cam_type] = std::min(upper_bound[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + + FLAGS_timestamp_offsets_max_change); + + // Tighten a bit to ensure we don't exceed things when we add + // and subtract timestamps later. Note that timestamps are + // measured in seconds and fractions of a second since epoch and + // can be quite large so precision loss can easily happen. + lower_bound[cam_type] += 1.0e-5; + upper_bound[cam_type] -= 1.0e-5; + + std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] + << ", " << upper_bound[cam_type] << "]\n"; + } } - typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; + // The images from the bag may need to be resized to be the same + // size as in the calibration file. Sometimes the full-res images + // can be so blurry that interest point matching fails, hence the + // resizing. + for (size_t it = 0; it < cam_params.size(); it++) + dense_map::adjustImageSize(cam_params[cams[it].camera_type], cams[it].image); - // Wrapper to find the value of a const map at a given key - int mapVal(std::map const& map, int key) { - auto ptr = map.find(key); - if (ptr == map.end()) - LOG(FATAL) << "Cannot find map value for given index."; - return ptr->second; + // Sort by the timestamp in reference camera time. This is essential + // for matching each image to other images close in time. + std::sort(cams.begin(), cams.end(), dense_map::timestampLess); + + if (FLAGS_verbose) { + int count = 10000; + std::vector image_files; + for (size_t it = 0; it < cams.size(); it++) { + std::ostringstream oss; + oss << count << "_" << cams[it].camera_type << ".jpg"; + std::string name = oss.str(); + std::cout << "Writing: " << name << std::endl; + cv::imwrite(name, cams[it].image); + count++; + image_files.push_back(name); + } } - // Find nav images close in time. Match sci and haz images in between to each - // other and to these nave images - // TODO(oalexan1): Reword the above explanation - void detect_match_features(// Inputs // NOLINT - std::vector const& images, // NOLINT - std::vector const& cid_to_image_type, // NOLINT - // Outputs // NOLINT - std::map & image_to_nav_it, // NOLINT - std::map & image_to_haz_it, // NOLINT - std::map & image_to_sci_it, // NOLINT - std::map & haz_cam_to_left_nav_cam_index, // NOLINT - std::map & haz_cam_to_right_nav_cam_index, // NOLINT - std::map & sci_cam_to_left_nav_cam_index, // NOLINT - std::map & sci_cam_to_right_nav_cam_index, // NOLINT - MATCH_MAP & matches) { // NOLINT - // Wipe the outputs - image_to_nav_it.clear(); - image_to_haz_it.clear(); - image_to_sci_it.clear(); - haz_cam_to_left_nav_cam_index.clear(); - haz_cam_to_right_nav_cam_index.clear(); - sci_cam_to_left_nav_cam_index.clear(); - sci_cam_to_right_nav_cam_index.clear(); - matches.clear(); - - int nav_it = 0, haz_it = 0, sci_it = 0; - for (int image_it = 0; image_it < static_cast(images.size()); image_it++) { - if (cid_to_image_type[image_it] == dense_map::NAV_CAM) { - image_to_nav_it[image_it] = nav_it; - nav_it++; - } else if (cid_to_image_type[image_it] == dense_map::HAZ_CAM) { - image_to_haz_it[image_it] = haz_it; - haz_it++; - } else if (cid_to_image_type[image_it] == dense_map::SCI_CAM) { - image_to_sci_it[image_it] = sci_it; - sci_it++; - } + // Detect features using multiple threads. Too many threads may result + // in high memory usage. + int num_threads_val = std::min(8u, std::thread::hardware_concurrency()); + std::ostringstream oss; + oss << num_threads_val; + std::string num_threads = oss.str(); + google::SetCommandLineOption("num_threads", num_threads.c_str()); + if (!gflags::GetCommandLineOption("num_threads", &num_threads)) + LOG(FATAL) << "Failed to get the value of --num_threads in Astrobee software.\n"; + std::cout << "Using " << num_threads << " threads for feature detection/matching." << std::endl; + std::cout << "Note that this step uses a huge amount of memory." << std::endl; + + std::cout << "Detecting features." << std::endl; + + std::vector cid_to_descriptor_map; + std::vector cid_to_keypoint_map; + cid_to_descriptor_map.resize(cams.size()); + cid_to_keypoint_map.resize(cams.size()); + { + // Make the threadpool go out of scope when not needed to not use up memory + ff_common::ThreadPool thread_pool; + for (size_t it = 0; it < cams.size(); it++) { + thread_pool.AddTask(&dense_map::detectFeatures, cams[it].image, FLAGS_verbose, + &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); } + thread_pool.Join(); + } - std::vector > image_pairs; + dense_map::MATCH_MAP matches; - // Look at two neighboring nav images. Find left_img_it and - // right_img_it so cid_to_image_type for these are nav images. - for (int left_img_it = 0; left_img_it < static_cast(images.size()); left_img_it++) { - if (cid_to_image_type[left_img_it] != dense_map::NAV_CAM) continue; // Not nav cam + std::vector > image_pairs; + for (size_t it1 = 0; it1 < cams.size(); it1++) { + for (size_t it2 = it1 + 1; it2 < std::min(cams.size(), it1 + FLAGS_num_overlaps + 1); it2++) { + image_pairs.push_back(std::make_pair(it1, it2)); + } + } - // now find right_img_it - int right_img_it = -1; - for (int local_it = left_img_it + 1; local_it < static_cast(images.size()); local_it++) { - if (cid_to_image_type[local_it] == dense_map::NAV_CAM) { - right_img_it = local_it; - break; - } + { + // The transform from the world to every camera + std::vector world_to_cam; + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + + std::cout << "Matching features." << std::endl; + ff_common::ThreadPool thread_pool; + std::mutex match_mutex; + for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { + auto pair = image_pairs[pair_it]; + int left_image_it = pair.first, right_image_it = pair.second; + if (FLAGS_refiner_skip_filtering) { + thread_pool.AddTask(&dense_map::matchFeatures, + &match_mutex, + left_image_it, right_image_it, + cid_to_descriptor_map[left_image_it], + cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], + cid_to_keypoint_map[right_image_it], + FLAGS_verbose, &matches[pair]); + } else { + thread_pool.AddTask(&dense_map::matchFeaturesWithCams, + &match_mutex, + left_image_it, right_image_it, + cam_params[cams[left_image_it].camera_type], + cam_params[cams[right_image_it].camera_type], + world_to_cam[left_image_it], + world_to_cam[right_image_it], + FLAGS_initial_max_reprojection_error, + cid_to_descriptor_map[left_image_it], + cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], + cid_to_keypoint_map[right_image_it], + FLAGS_verbose, &matches[pair]); } + } + thread_pool.Join(); + } + cid_to_descriptor_map = std::vector(); // Wipe, takes memory - if (right_img_it < 0) continue; + // If feature A in image I matches feather B in image J, which matches feature C in image K, + // then (A, B, C) belong together into a track. Build such a track. - // Now look at sci and haz images in between - std::vector nav_cam_indices, haz_cam_indices, sci_cam_indices; + std::vector, int>> keypoint_map(cams.size()); - nav_cam_indices.push_back(left_img_it); - nav_cam_indices.push_back(right_img_it); + // Give all interest points in a given image a unique id + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias - for (int local_img_it = left_img_it + 1; local_img_it < right_img_it; local_img_it++) { - int left_nav_it = mapVal(image_to_nav_it, left_img_it); - int right_nav_it = mapVal(image_to_nav_it, right_img_it); + int left_index = index_pair.first; + int right_index = index_pair.second; - if (cid_to_image_type[local_img_it] == dense_map::HAZ_CAM) { - int haz_it = mapVal(image_to_haz_it, local_img_it); - haz_cam_indices.push_back(local_img_it); - haz_cam_to_left_nav_cam_index[haz_it] = left_nav_it; - haz_cam_to_right_nav_cam_index[haz_it] = right_nav_it; + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + keypoint_map[left_index][dist_left_ip] = 0; + keypoint_map[right_index][dist_right_ip] = 0; + } + } - } else if (cid_to_image_type[local_img_it] == dense_map::SCI_CAM) { - int sci_it = mapVal(image_to_sci_it, local_img_it); - sci_cam_indices.push_back(local_img_it); - sci_cam_to_left_nav_cam_index[image_to_sci_it[local_img_it]] = left_nav_it; - sci_cam_to_right_nav_cam_index[image_to_sci_it[local_img_it]] = right_nav_it; - } - } + // Give all interest points in a given image a unique id + // And put them in a vector with the id corresponding to the interest point + std::vector>> keypoint_vec(cams.size()); + for (size_t cam_it = 0; cam_it < cams.size(); cam_it++) { + int count = 0; + for (auto ip_it = keypoint_map[cam_it].begin(); ip_it != keypoint_map[cam_it].end(); ip_it++) { + ip_it->second = count; + count++; + keypoint_vec[cam_it].push_back(ip_it->first); + } + } - // Match haz to nav - for (size_t haz_it = 0; haz_it < haz_cam_indices.size(); haz_it++) { - for (size_t nav_it = 0; nav_it < nav_cam_indices.size(); nav_it++) { - image_pairs.push_back(std::make_pair(haz_cam_indices[haz_it], nav_cam_indices[nav_it])); - } - } + openMVG::matching::PairWiseMatches match_map; + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias - // Match haz to sci - for (size_t haz_it = 0; haz_it < haz_cam_indices.size(); haz_it++) { - for (size_t sci_it = 0; sci_it < sci_cam_indices.size(); sci_it++) { - image_pairs.push_back(std::make_pair(haz_cam_indices[haz_it], sci_cam_indices[sci_it])); - } - } + int left_index = index_pair.first; + int right_index = index_pair.second; - // Match sci to nav - for (size_t nav_it = 0; nav_it < nav_cam_indices.size(); nav_it++) { - for (size_t sci_it = 0; sci_it < sci_cam_indices.size(); sci_it++) { - image_pairs.push_back(std::make_pair(sci_cam_indices[sci_it], nav_cam_indices[nav_it])); - } - } - } + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; - // Detect features using multiple threads - std::vector cid_to_descriptor_map; - std::vector cid_to_keypoint_map; - cid_to_descriptor_map.resize(images.size()); - cid_to_keypoint_map.resize(images.size()); - ff_common::ThreadPool thread_pool1; - for (size_t it = 0; it < images.size(); it++) - thread_pool1.AddTask(&dense_map::detectFeatures, images[it], FLAGS_verbose, - &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); - thread_pool1.Join(); + std::vector mvg_matches; - // Create the matches among nav, haz, and sci images. Note that we - // have a starting and ending nav cam images, and match all the - // haz and sci images to these two nav cam images and to each - // other. + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); - // Find the matches using multiple threads - ff_common::ThreadPool thread_pool2; - std::mutex match_mutex; - for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { - auto pair = image_pairs[pair_it]; - int left_image_it = pair.first, right_image_it = pair.second; - thread_pool2.AddTask(&dense_map::matchFeatures, &match_mutex, left_image_it, right_image_it, - cid_to_descriptor_map[left_image_it], - cid_to_descriptor_map[right_image_it], - cid_to_keypoint_map[left_image_it], cid_to_keypoint_map[right_image_it], - FLAGS_verbose, &matches[pair]); + int left_id = keypoint_map[left_index][dist_left_ip]; + int right_id = keypoint_map[right_index][dist_right_ip]; + mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); } - thread_pool2.Join(); - - return; + match_map[index_pair] = mvg_matches; } -} // namespace dense_map -int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - GOOGLE_PROTOBUF_VERIFY_VERSION; - std::cout.precision(17); // to be able to print timestamps + if (FLAGS_verbose) { + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; - if (FLAGS_ros_bag.empty()) - LOG(FATAL) << "The bag file was not specified."; - if (FLAGS_sparse_map.empty()) - LOG(FATAL) << "The input sparse map was not specified."; + int left_index = index_pair.first; + int right_index = index_pair.second; - if (FLAGS_output_map.empty()) - LOG(FATAL) << "The output sparse map was not specified."; + std::ostringstream oss1; + oss1 << (10000 + left_index) << "_" << cams[left_index].camera_type; - if (!FLAGS_skip_registration) { - if (FLAGS_xyz_file.empty() || FLAGS_hugin_file.empty()) - LOG(FATAL) << "Either the hugin or xyz file was not specified."; - } + std::string left_stem = oss1.str(); + std::string left_image = left_stem + ".jpg"; - if (FLAGS_opt_map_only && FLAGS_fix_map) - LOG(FATAL) << "Cannot both float the sparse map and keep it fixed."; + std::ostringstream oss2; + oss2 << (10000 + right_index) << "_" << cams[right_index].camera_type; - if (FLAGS_robust_threshold <= 0.0) - LOG(FATAL) << "The robust threshold must be positive.\n"; + std::string right_stem = oss2.str(); + std::string right_image = right_stem + ".jpg"; - // Set up handles for reading data at given time stamp without - // searching through the whole bag each time. - dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); - dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); - dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); - dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); + std::string match_file = left_stem + "__" + right_stem + ".match"; - if (nav_cam_handle.bag_msgs.empty()) LOG(FATAL) << "No nav cam images found."; - if (sci_cam_handle.bag_msgs.empty()) LOG(FATAL) << "No sci cam images found."; - if (haz_cam_intensity_handle.bag_msgs.empty()) LOG(FATAL) << "No haz cam images found."; + std::cout << "Writing: " << left_image << ' ' << right_image << ' ' + << match_file << std::endl; + dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); + } + } - // Read the config file - std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; - std::vector cam_params; - std::vector ref_to_cam_trans; - std::vector ref_to_cam_timestamp_offsets; - Eigen::Affine3d navcam_to_body_trans; - Eigen::Affine3d hazcam_depth_to_image_transform; - dense_map::readConfigFile( // Inputs - cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", - // Outputs - cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, navcam_to_body_trans, - hazcam_depth_to_image_transform); + // TODO(oalexan1): De-allocate all data when no longer need as + // the matching logic takes a lot of time. + + // not needed anymore and take up a lot of RAM + matches.clear(); matches = dense_map::MATCH_MAP(); + keypoint_map.clear(); keypoint_map.shrink_to_fit(); + cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); + + std::vector > pid_to_cid_fid; + { + // Build tracks + // De-allocate these as soon as not needed to save memory + openMVG::tracks::TracksBuilder trackBuilder; + trackBuilder.Build(match_map); // Build: Efficient fusion of correspondences + trackBuilder.Filter(); // Filter: Remove tracks that have conflict + // trackBuilder.ExportToStream(std::cout); + // Export tracks as a map (each entry is a sequence of imageId and featureIndex): + // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} + openMVG::tracks::STLMAPTracks map_tracks; + trackBuilder.ExportToSTL(map_tracks); + match_map = openMVG::matching::PairWiseMatches(); // wipe this, no longer needed + trackBuilder = openMVG::tracks::TracksBuilder(); // wipe it + + if (map_tracks.empty()) + LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; + + size_t num_elems = map_tracks.size(); + // Populate the filtered tracks + pid_to_cid_fid.clear(); + pid_to_cid_fid.resize(num_elems); + size_t curr_id = 0; + for (auto itr = map_tracks.begin(); itr != map_tracks.end(); itr++) { + for (auto itr2 = (itr->second).begin(); itr2 != (itr->second).end(); itr2++) { + pid_to_cid_fid[curr_id][itr2->first] = itr2->second; + } + curr_id++; + } + } - if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { - for (size_t it = 0; it < cam_names.size(); it++) { - if (cam_names[it] == "sci_cam") { - double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; - std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] - << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; - ref_to_cam_timestamp_offsets[it] = new_val; + // Set up the variable blocks to optimize for BracketedCamError + std::vector bracketed_cam_block_sizes; + int num_focal_lengths = 1; + int num_distortion_params = 1; // will be overwritten + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_cam_block_sizes.push_back(num_focal_lengths); + bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); + bracketed_cam_block_sizes.push_back(num_distortion_params); // will be modified later + + // Set up the variable blocks to optimize for RefCamError + std::vector ref_cam_block_sizes; + ref_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + ref_cam_block_sizes.push_back(num_focal_lengths); + ref_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); + ref_cam_block_sizes.push_back(num_distortion_params); // will be modified later + + // Set up variable blocks to optimize for BracketedDepthError + std::vector bracketed_depth_block_sizes; + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(num_depth_params); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for BracketedDepthMeshError + std::vector bracketed_depth_mesh_block_sizes; + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(num_depth_params); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for RefDepthError + std::vector ref_depth_block_sizes; + ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_depth_block_sizes.push_back(num_depth_params); + ref_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + ref_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + + // Set up the variable blocks to optimize for the mesh xyz + std::vector mesh_block_sizes; + mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + + // For a given fid = pid_to_cid_fid[pid][cid], the value + // pid_cid_fid_inlier[pid][cid][fid] will be non-zero only if this + // pixel is an inlier. Originally all pixels are inliers. Once an + // inlier becomes an outlier, it never becomes an inlier again. + std::vector>> pid_cid_fid_inlier; + if (!FLAGS_refiner_skip_filtering) { + pid_cid_fid_inlier.resize(pid_to_cid_fid.size()); + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + pid_cid_fid_inlier[pid][cid][fid] = 1; } } } - // Copy to structures expected by this tool - camera::CameraParameters nav_cam_params = cam_params[0]; - camera::CameraParameters haz_cam_params = cam_params[1]; - camera::CameraParameters sci_cam_params = cam_params[2]; - Eigen::Affine3d hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); - Eigen::Affine3d scicam_to_hazcam_aff_trans = - ref_to_cam_trans[1] * ref_to_cam_trans[2].inverse(); - double navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; - double scicam_to_hazcam_timestamp_offset = - ref_to_cam_timestamp_offsets[1] - ref_to_cam_timestamp_offsets[2]; + for (int pass = 0; pass < FLAGS_refiner_num_passes; pass++) { + std::cout << "\nOptimization pass " << pass + 1 << " / " << FLAGS_refiner_num_passes << "\n"; + + // The transforms from the world to all cameras must be updated + // given the current state of optimization + std::vector world_to_cam; + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + + // Do multiview triangulation + std::vector xyz_vec(pid_to_cid_fid.size()); + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + std::vector focal_length_vec; + std::vector world_to_cam_vec; + std::vector pix_vec; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Triangulate inliers only + if (!FLAGS_refiner_skip_filtering && + !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; - if (FLAGS_mesh_weight <= 0.0 || FLAGS_mesh_robust_threshold <= 0.0) - LOG(FATAL) << "The mesh weight and robust threshold must be positive.\n"; + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + + if (cams[cid].camera_type == ref_cam_type) { + // Flag as outliers pixels at the nav_cam boundary, if desired. This + // is especially important when the nav_cam uses the radtan + // model instead of fisheye. + Eigen::Vector2i dist_size = cam_params[cams[cid].camera_type].GetDistortedSize(); + int excl = FLAGS_nav_cam_num_exclude_boundary_pixels; + if (dist_ip.x() < excl || dist_ip.x() > dist_size[0] - 1 - excl || + dist_ip.y() < excl || dist_ip.y() > dist_size[1] - 1 - excl) { + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + continue; + } + } - mve::TriangleMesh::Ptr mesh; - std::shared_ptr mesh_info; - std::shared_ptr graph; - std::shared_ptr bvh_tree; - if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); -#if 0 - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() - << "\n"; -#endif - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; + focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); + world_to_cam_vec.push_back(world_to_cam[cid]); + pix_vec.push_back(undist_ip); + } - // Read the sparse map - boost::shared_ptr sparse_map = - boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + if (!FLAGS_refiner_skip_filtering && pix_vec.size() < 2) { + // If after outlier filtering less than two rays are left, can't triangulate. + // Must set all features for this pid to outliers. + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } - // TODO(oalexan1): All this timestamp reading logic below should be in a function + // Nothing else to do + continue; + } - // Find the minimum and maximum timestamps in the sparse map - double min_map_timestamp = std::numeric_limits::max(); - double max_map_timestamp = -min_map_timestamp; - std::vector sparse_map_timestamps; - const std::vector& sparse_map_images = sparse_map->cid_to_filename_; - sparse_map_timestamps.resize(sparse_map_images.size()); - for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { - double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); - sparse_map_timestamps[cid] = timestamp; - min_map_timestamp = std::min(min_map_timestamp, sparse_map_timestamps[cid]); - max_map_timestamp = std::max(max_map_timestamp, sparse_map_timestamps[cid]); - } - if (sparse_map_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; - - // Read the haz cam timestamps - std::vector const& haz_cam_intensity_msgs - = haz_cam_intensity_handle.bag_msgs; - if (haz_cam_intensity_msgs.empty()) LOG(FATAL) << "No haz cam messages are present."; - double haz_cam_start_time = -1.0; - std::vector all_haz_cam_inten_timestamps; - for (size_t it = 0; it < haz_cam_intensity_msgs.size(); it++) { - sensor_msgs::Image::ConstPtr image_msg - = haz_cam_intensity_msgs[it].instantiate(); - if (image_msg) { - double haz_cam_time = image_msg->header.stamp.toSec(); - all_haz_cam_inten_timestamps.push_back(haz_cam_time); - if (haz_cam_start_time < 0) haz_cam_start_time = haz_cam_time; + // Triangulate n rays emanating from given undistorted and centered pixels + xyz_vec[pid] = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); } - } - // Read the sci cam timestamps from the bag - std::vector const& sci_cam_msgs = sci_cam_handle.bag_msgs; - std::vector all_sci_cam_timestamps; - for (size_t sci_it = 0; sci_it < sci_cam_msgs.size(); sci_it++) { - sensor_msgs::Image::ConstPtr sci_image_msg - = sci_cam_msgs[sci_it].instantiate(); - sensor_msgs::CompressedImage::ConstPtr comp_sci_image_msg = - sci_cam_msgs[sci_it].instantiate(); - if (sci_image_msg) - all_sci_cam_timestamps.push_back(sci_image_msg->header.stamp.toSec()); - else if (comp_sci_image_msg) - all_sci_cam_timestamps.push_back(comp_sci_image_msg->header.stamp.toSec()); - } + if (pass == 0 && nav_cam_distortion_replacement.size() > 1) { + // At the first pass, right after triangulation is done with a + // given nav cam model, which presumably was pretty accurate, + // replace its distortion if desired, which we will then + // optimize. + std::cout << "Setting nav cam distortion to: " << nav_cam_distortion_replacement.transpose() + << ". Will proceed to optimize it.\n"; + cam_params[ref_cam_type].SetDistortion(nav_cam_distortion_replacement); + distortions[ref_cam_type] = cam_params[ref_cam_type].GetDistortion(); + } - // If desired to process only specific timestamps - std::set sci_cam_timestamps_to_use; - if (FLAGS_sci_cam_timestamps != "") { - std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); - double val; - while (ifs >> val) sci_cam_timestamps_to_use.insert(val); - } + // For a given fid = pid_to_cid_fid[pid][cid], + // the value pid_cid_fid_to_residual[pid][cid][fid] will be the index in the array + // of residuals (look only at pixel residuals). This structure is populated only for + // inliers, so its size changes at each pass. + std::vector>> pid_cid_fid_to_residual; + if (!FLAGS_refiner_skip_filtering) pid_cid_fid_to_residual.resize(pid_to_cid_fid.size()); + + // Form the problem + ceres::Problem problem; + std::vector residual_names; + std::vector residual_scales; + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!FLAGS_refiner_skip_filtering && + !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; - // Will optimize the nav cam poses as part of the process - std::vector& nav_cam_affines = sparse_map->cid_to_cam_t_global_; // alias - - if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { - double latest_start - = sparse_map_timestamps.back() + navcam_to_hazcam_timestamp_offset - haz_cam_start_time; - if (latest_start < FLAGS_start) { - std::cout << "The sparse map ended before the desired start time. " - << "Use a start time no later than " << latest_start << "." << std::endl; - return 1; + int cam_type = cams[cid].camera_type; + int beg_ref_index = cams[cid].beg_ref_index; + int end_ref_index = cams[cid].end_ref_index; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + + if (cam_type == ref_cam_type) { + // The cost function of projecting in the ref cam. + ceres::CostFunction* ref_cost_function = + dense_map::RefCamError::Create(dist_ip, ref_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* ref_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // Remember the index of the residuals about to create + if (!FLAGS_refiner_skip_filtering) + pid_cid_fid_to_residual[pid][cid][fid] = residual_names.size(); + + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (ref_cost_function, ref_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &xyz_vec[pid][0], + &focal_lengths[cam_type], + &optical_centers[cam_type][0], + &distortions[cam_type][0]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + } + + Eigen::Vector3d depth_xyz(0, 0, 0); + if (FLAGS_depth_tri_weight == 0 || + !dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) + continue; // could not look up the depth value + + // The constraint that the depth points agree with triangulated points + ceres::CostFunction* ref_depth_cost_function + = dense_map::RefDepthError::Create(FLAGS_depth_tri_weight, + depth_xyz, + ref_depth_block_sizes); + + ceres::LossFunction* ref_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back("depth_tri_x_m"); + residual_names.push_back("depth_tri_y_m"); + residual_names.push_back("depth_tri_z_m"); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + problem.AddResidualBlock + (ref_depth_cost_function, + ref_depth_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &normalized_depth_to_image_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0]); + + if (!FLAGS_float_sparse_map) + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&normalized_depth_to_image_vec[num_depth_params * cam_type]); + + } else { + // Other cameras, which need bracketing + ceres::CostFunction* bracketed_cost_function = + dense_map::BracketedCamError::Create(dist_ip, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* bracketed_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // Remember the index of the residuals about to create + if (!FLAGS_refiner_skip_filtering) + pid_cid_fid_to_residual[pid][cid][fid] = residual_names.size(); + + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (bracketed_cost_function, bracketed_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type], + &focal_lengths[cam_type], + &optical_centers[cam_type][0], + &distortions[cam_type][0]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { + problem.SetParameterBlockConstant + (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + } + + Eigen::Vector3d depth_xyz(0, 0, 0); + if (FLAGS_depth_tri_weight == 0 || + !dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) + continue; // could not look up the depth value + + // Ensure that the depth points agree with triangulated points + ceres::CostFunction* bracketed_depth_cost_function + = dense_map::BracketedDepthError::Create(FLAGS_depth_tri_weight, + depth_xyz, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_block_sizes); + + ceres::LossFunction* bracketed_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back("depth_tri_x_m"); + residual_names.push_back("depth_tri_y_m"); + residual_names.push_back("depth_tri_z_m"); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + problem.AddResidualBlock + (bracketed_depth_cost_function, + bracketed_depth_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type]); + + // TODO(oalexan1): This code repeats too much. Need to keep a hash + // of sparse map pointers. + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { + problem.SetParameterBlockConstant + (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + } + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&normalized_depth_to_image_vec[num_depth_params * cam_type]); + } + } } - double earliest_end = sparse_map_timestamps[0] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - if (earliest_end > FLAGS_start + FLAGS_duration) { - std::cout << "The sparse map did not start yet. The sum of start and duration " - << "must be at least " << earliest_end << "." << std::endl; - return 1; - } - } + if (FLAGS_mesh != "") { + // Add the mesh constraint + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + Eigen::Vector3d mesh_xyz(0, 0, 0); + int num_intersections = 0; // Number of mesh intersections given the rays for this pid + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!FLAGS_refiner_skip_filtering && + !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); + + // Intersect the ray with the mesh + bool have_mesh_intersection = false; + Eigen::Vector3d mesh_intersect(0.0, 0.0, 0.0); + have_mesh_intersection + = dense_map::ray_mesh_intersect(undist_ip, cam_params[cams[cid].camera_type], + world_to_cam[cid], mesh, bvh_tree, + FLAGS_min_ray_dist, FLAGS_max_ray_dist, + // Output + mesh_intersect); + + if (have_mesh_intersection) { + mesh_xyz += mesh_intersect; + num_intersections += 1; + } + + if (FLAGS_depth_mesh_weight > 0 && have_mesh_intersection) { + // Try to make each mesh intersection agree with corresponding depth measurement, + // if it exists + Eigen::Vector3d depth_xyz(0, 0, 0); + if (dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) { + int cam_type = cams[cid].camera_type; + int beg_ref_index = cams[cid].beg_ref_index; + int end_ref_index = cams[cid].end_ref_index; + if (cam_type != ref_cam_type) { + // TODO(oalexan1): What to do when cam_type == ref_cam_type? + // Ensure that the depth points agree with mesh points + ceres::CostFunction* bracketed_depth_mesh_cost_function + = dense_map::BracketedDepthMeshError::Create(FLAGS_depth_mesh_weight, + depth_xyz, + mesh_intersect, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_mesh_block_sizes); + + ceres::LossFunction* bracketed_depth_mesh_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back("depth_mesh_x_m"); + residual_names.push_back("depth_mesh_y_m"); + residual_names.push_back("depth_mesh_z_m"); + residual_scales.push_back(FLAGS_depth_mesh_weight); + residual_scales.push_back(FLAGS_depth_mesh_weight); + residual_scales.push_back(FLAGS_depth_mesh_weight); + problem.AddResidualBlock + (bracketed_depth_mesh_cost_function, + bracketed_depth_mesh_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &ref_to_cam_timestamp_offsets[cam_type]); + + // TODO(oalexan1): This code repeats too much. Need to keep a hash + // of sparse map pointers. + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { + problem.SetParameterBlockConstant + (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + } + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&normalized_depth_to_image_vec[num_depth_params * cam_type]); + } + } + } + } - int nav_cam_start = -1; // Record the first nav cam image used - std::vector cid_to_image_type; - std::vector haz_cam_intensity_timestamps, sci_cam_timestamps; // timestamps we will use - std::vector images; - std::vector depth_clouds; - // Find nav images close in time. Match sci and haz images in between to each - // other and to these nave images - // TODO(oalexan1): This selects haz cam images outside of bracket. - // TODO(oalexan1): No check for bracket size either. - select_images_to_match( // Inputs // NOLINT - haz_cam_start_time, navcam_to_hazcam_timestamp_offset, // NOLINT - scicam_to_hazcam_timestamp_offset, // NOLINT - sparse_map_timestamps, all_haz_cam_inten_timestamps, // NOLINT - all_sci_cam_timestamps, sci_cam_timestamps_to_use, // NOLINT - nav_cam_handle, sci_cam_handle, haz_cam_points_handle, // NOLINT - haz_cam_intensity_handle, // NOLINT - sci_cam_params, // NOLINT - // Outputs // NOLINT - nav_cam_start, cid_to_image_type, // NOLINT - haz_cam_intensity_timestamps, sci_cam_timestamps, // NOLINT - images, depth_clouds); // NOLINT - - // If an image in the vector of images is of nav_cam type, see where its index is - // in the list of nav cam images - std::map image_to_nav_it, image_to_haz_it, image_to_sci_it; - - // Given the index of a haz cam or sci cam image, find the index of the left and right - // nav cam images bounding it in time. - std::map haz_cam_to_left_nav_cam_index, haz_cam_to_right_nav_cam_index; - std::map sci_cam_to_left_nav_cam_index, sci_cam_to_right_nav_cam_index; - - // Map haz cam and nav cam index to the vectors of haz cam to nav cam matches - dense_map::MATCH_MAP matches; + if (num_intersections >= 1 && FLAGS_mesh_tri_weight > 0) { + // Average the intersections of all rays with the mesh, and try to make + // the triangulated point agree with the mesh intersection - // TODO(oalexan1): Add explanation here - detect_match_features(// Inputs // NOLINT - images, cid_to_image_type, // NOLINT - // Outputs // NOLINT - image_to_nav_it, image_to_haz_it, // NOLINT - image_to_sci_it, // NOLINT - haz_cam_to_left_nav_cam_index, // NOLINT - haz_cam_to_right_nav_cam_index, // NOLINT - sci_cam_to_left_nav_cam_index, // NOLINT - sci_cam_to_right_nav_cam_index, // NOLINT - matches); // NOLINT - - double hazcam_depth_to_image_scale = pow(hazcam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); - - // Since we will keep the scale fixed, vary the part of the transform without - // the scale, while adding the scale each time before the transform is applied - Eigen::Affine3d hazcam_depth_to_image_noscale = hazcam_depth_to_image_transform; - hazcam_depth_to_image_noscale.linear() /= hazcam_depth_to_image_scale; - - // Form the optimization vector for hazcam_depth_to_image_transform - std::vector hazcam_depth_to_image_vec(dense_map::NUM_RIGID_PARAMS); - dense_map::rigid_transform_to_array(hazcam_depth_to_image_noscale, &hazcam_depth_to_image_vec[0]); - - // Go back from the array to the original transform. This will make it into a pure - // scale*rotation + translation, removing the artifacts of it having been read - // from disk where it was stored with just a few digits of precision - dense_map::array_to_rigid_transform(hazcam_depth_to_image_transform, &hazcam_depth_to_image_vec[0]); - hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; - - // Form the optimization vector for hazcam_to_navcam_aff_trans - std::vector hazcam_to_navcam_vec(dense_map::NUM_RIGID_PARAMS); - dense_map::rigid_transform_to_array(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); - - // Recreate hazcam_to_navcam_aff_trans as above - dense_map::array_to_rigid_transform(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); - - // Form the optimization vector for scicam_to_hazcam_aff_trans - std::vector scicam_to_hazcam_vec(dense_map::NUM_RIGID_PARAMS); - dense_map::rigid_transform_to_array(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); - - // Recreate scicam_to_hazcam_aff_trans as above - dense_map::array_to_rigid_transform(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); - - // See how many nav to sci matches we have and allocate space for their xyz - // TODO(oalexan1): This must be factored out as a function - int num_nav_sci_xyz = 0; - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair index_pair = it->first; - dense_map::MATCH_PAIR const& match_pair = it->second; + mesh_xyz /= num_intersections; - std::map::iterator image_haz_it, image_nav_it, image_sci_it; + ceres::CostFunction* mesh_cost_function = + dense_map::XYZError::Create(mesh_xyz, mesh_block_sizes, FLAGS_mesh_tri_weight); - image_sci_it = image_to_sci_it.find(index_pair.first); - image_nav_it = image_to_nav_it.find(index_pair.second); - if (image_sci_it != image_to_sci_it.end() && image_nav_it != image_to_nav_it.end()) { - int sci_it = image_sci_it->second; - int nav_it = image_nav_it->second; + ceres::LossFunction* mesh_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - // Add to the number of xyz - num_nav_sci_xyz += match_pair.first.size(); + problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, + &xyz_vec[pid][0]); - if (FLAGS_verbose) saveImagesAndMatches("sci", "nav", index_pair, match_pair, images); - } + residual_names.push_back("mesh_tri_x_m"); + residual_names.push_back("mesh_tri_y_m"); + residual_names.push_back("mesh_tri_z_m"); - image_haz_it = image_to_haz_it.find(index_pair.first); - image_sci_it = image_to_sci_it.find(index_pair.second); - if (image_haz_it != image_to_haz_it.end() && image_sci_it != image_to_sci_it.end()) { - if (FLAGS_verbose) saveImagesAndMatches("haz", "sci", index_pair, match_pair, images); + residual_scales.push_back(FLAGS_mesh_tri_weight); + residual_scales.push_back(FLAGS_mesh_tri_weight); + residual_scales.push_back(FLAGS_mesh_tri_weight); + } + } } - image_haz_it = image_to_haz_it.find(index_pair.first); - image_nav_it = image_to_nav_it.find(index_pair.second); - if (image_haz_it != image_to_haz_it.end() && image_nav_it != image_to_nav_it.end()) { - if (FLAGS_verbose) saveImagesAndMatches("haz", "nav", index_pair, match_pair, images); + // See which intrinsics from which cam to float or keep fixed + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (intrinsics_to_float[cam_type].find("focal_length") == + intrinsics_to_float[cam_type].end()) { + // std::cout << "For " << cam_names[cam_type] << " not floating focal_length." << std::endl; + problem.SetParameterBlockConstant(&focal_lengths[cam_type]); + } else { + // std::cout << "For " << cam_names[cam_type] << " floating focal_length." << std::endl; + } + if (intrinsics_to_float[cam_type].find("optical_center") == + intrinsics_to_float[cam_type].end()) { + // std::cout << "For " << cam_names[cam_type] << " not floating optical_center." << + // std::endl; + problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); + } else { + // std::cout << "For " << cam_names[cam_type] << " floating optical_center." << std::endl; + } + if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) { + // std::cout << "For " << cam_names[cam_type] << " not floating distortion." << std::endl; + problem.SetParameterBlockConstant(&distortions[cam_type][0]); + } else { + // std::cout << "For " << cam_names[cam_type] << " floating distortion." << std::endl; + } } - } - // Prepare for floating sci cam params - int num_scicam_focal_lengths = 1; // Same focal length in x and y - std::set sci_cam_intrinsics_to_float; - dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, sci_cam_intrinsics_to_float); - Eigen::Vector2d sci_cam_focal_vector = sci_cam_params.GetFocalVector(); - if (num_scicam_focal_lengths == 1) { - sci_cam_focal_vector[0] = sci_cam_params.GetFocalLength(); // average the two focal lengths - sci_cam_focal_vector[1] = sci_cam_focal_vector[0]; - } - std::cout << std::endl; - std::cout << "Initial focal length for sci cam: " << sci_cam_focal_vector.transpose() << std::endl; - Eigen::Vector2d sci_cam_optical_center = sci_cam_params.GetOpticalOffset(); - std::cout << "Initial optical center for sci_cam: " << sci_cam_optical_center.transpose() << std::endl; - Eigen::VectorXd sci_cam_distortion = sci_cam_params.GetDistortion(); - std::cout << "Initial distortion for sci_cam: " << sci_cam_distortion.transpose() << std::endl; - - std::vector depth_to_haz_block_sizes, nav_block_sizes; - std::vector depth_to_nav_block_sizes, depth_to_sci_block_sizes; - std::vector sci_block_sizes, mesh_block_sizes; - dense_map::set_up_block_sizes(// Inputs // NOLINT - num_scicam_focal_lengths, sci_cam_distortion.size(), // NOLINT - // Outputs // NOLINT - depth_to_haz_block_sizes, nav_block_sizes, // NOLINT - depth_to_nav_block_sizes, depth_to_sci_block_sizes, // NOLINT - sci_block_sizes, mesh_block_sizes); // NOLINT - - // Storage for the residual names and xyz - std::vector residual_names; - std::vector nav_sci_xyz(dense_map::NUM_XYZ_PARAMS * num_nav_sci_xyz, 0); - std::vector initial_nav_sci_xyz(num_nav_sci_xyz, Eigen::Vector3d(0, 0, 0)); - - // Put the sparse map camera transforms in a vector so we can optimize them further - int num_cams = nav_cam_affines.size(); - std::vector nav_cam_vec(num_cams * dense_map::NUM_RIGID_PARAMS); - for (int cid = 0; cid < num_cams; cid++) - dense_map::rigid_transform_to_array(nav_cam_affines[cid], - &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - int nav_sci_xyz_count = 0; - - ceres::Problem problem; - - // Form the problem - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair const& index_pair = it->first; // alias - dense_map::MATCH_PAIR const& match_pair = it->second; // alias + // Evaluate the residuals before optimization + double total_cost = 0.0; + std::vector residuals; + ceres::Problem::EvaluateOptions eval_options; + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale + residuals[it] /= residual_scales[it]; + dense_map::calc_median_residuals(residuals, residual_names, "before opt"); + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "Initial res " << residual_names[it] << " " << residuals[it] << std::endl; + } - std::map::iterator image_haz_it, image_nav_it, image_sci_it; - - // Doing haz cam to nav cam matches - image_haz_it = image_to_haz_it.find(index_pair.first); - image_nav_it = image_to_nav_it.find(index_pair.second); - if (image_haz_it != image_to_haz_it.end() && image_nav_it != image_to_nav_it.end()) { - int haz_it = image_haz_it->second; - int nav_it = image_nav_it->second; - dense_map::add_haz_nav_cost(// Inputs // NOLINT - haz_it, nav_it, nav_cam_start, // NOLINT - navcam_to_hazcam_timestamp_offset, // NOLINT - match_pair, haz_cam_intensity_timestamps, // NOLINT - sparse_map_timestamps, haz_cam_to_left_nav_cam_index, // NOLINT - haz_cam_to_right_nav_cam_index, nav_cam_params, // NOLINT - haz_cam_params, depth_to_nav_block_sizes, // NOLINT - depth_to_haz_block_sizes, // NOLINT - nav_cam_affines, depth_clouds, // NOLINT - // Outputs // NOLINT - residual_names, hazcam_depth_to_image_scale, // NOLINT - nav_cam_vec, hazcam_to_navcam_vec, // NOLINT - hazcam_depth_to_image_vec, // NOLINT - problem); // NOLINT + // Solve the problem + ceres::Solver::Options options; + ceres::Solver::Summary summary; + options.linear_solver_type = ceres::ITERATIVE_SCHUR; + options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread + options.max_num_iterations = FLAGS_num_iterations; + options.minimizer_progress_to_stdout = true; + options.gradient_tolerance = 1e-16; + options.function_tolerance = 1e-16; + options.parameter_tolerance = FLAGS_parameter_tolerance; + ceres::Solve(options, &problem, &summary); + + // The optimization is done. Right away copy the optimized states to where they belong to + // keep all data in sync. + + // Copy back the reference transforms + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::array_to_rigid_transform(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // Copy back the optimized intrinsics + for (int it = 0; it < num_cam_types; it++) { + cam_params[it].SetFocalLength(Eigen::Vector2d(focal_lengths[it], focal_lengths[it])); + cam_params[it].SetOpticalOffset(optical_centers[it]); + cam_params[it].SetDistortion(distortions[it]); } - // Doing haz cam to sci cam matches - image_haz_it = image_to_haz_it.find(index_pair.first); - image_sci_it = image_to_sci_it.find(index_pair.second); - if (image_haz_it != image_to_haz_it.end() && image_sci_it != image_to_sci_it.end()) { - int haz_it = image_haz_it->second; - int sci_it = image_sci_it->second; - add_haz_sci_cost( // Inputs // NOLINT - haz_it, sci_it, nav_cam_start, navcam_to_hazcam_timestamp_offset, // NOLINT - scicam_to_hazcam_timestamp_offset, match_pair, // NOLINT - haz_cam_intensity_timestamps, sparse_map_timestamps, // NOLINT - sci_cam_timestamps, haz_cam_to_left_nav_cam_index, // NOLINT - haz_cam_to_right_nav_cam_index, sci_cam_to_left_nav_cam_index, // NOLINT - sci_cam_to_right_nav_cam_index, sci_cam_params, // NOLINT - haz_cam_params, depth_to_sci_block_sizes, // NOLINT - depth_to_haz_block_sizes, nav_cam_affines, // NOLINT - depth_clouds, // NOLINT - // Outputs // NOLINT - residual_names, hazcam_depth_to_image_scale, nav_cam_vec, // NOLINT - hazcam_to_navcam_vec, scicam_to_hazcam_vec, // NOLINT - hazcam_depth_to_image_vec, sci_cam_focal_vector, // NOLINT - sci_cam_optical_center, sci_cam_distortion, problem); // NOLINT + // If the nav cam did not get optimized, go back to the solution + // with two focal lengths, rather than the one with one focal length + // solved by this solver (as the average of the two). The two focal + // lengths are very similar, but it is not worth modifying the + // camera model we don't plan to optimize. + if (FLAGS_nav_cam_intrinsics_to_float == "" || FLAGS_num_iterations == 0) + cam_params[ref_cam_type] = orig_cam_params[ref_cam_type]; + + // Copy back the optimized extrinsics + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::array_to_rigid_transform + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Copy back the depth to image transforms without scales + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (FLAGS_affine_depth_to_image) + dense_map::array_to_affine_transform + (normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); + else + dense_map::array_to_rigid_transform( + normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); } - // Doing sci cam to nav cam matches - image_nav_it = image_to_nav_it.find(index_pair.second); - image_sci_it = image_to_sci_it.find(index_pair.first); - if (image_nav_it != image_to_nav_it.end() && image_sci_it != image_to_sci_it.end()) { - int nav_it = image_nav_it->second; - int sci_it = image_sci_it->second; - - add_nav_sci_cost(// Inputs // NOLINT - nav_it, sci_it, nav_cam_start, // NOLINT - navcam_to_hazcam_timestamp_offset, // NOLINT - scicam_to_hazcam_timestamp_offset, // NOLINT - match_pair, // NOLINT - sparse_map_timestamps, sci_cam_timestamps, // NOLINT - sci_cam_to_left_nav_cam_index, sci_cam_to_right_nav_cam_index, // NOLINT - hazcam_to_navcam_aff_trans, scicam_to_hazcam_aff_trans, // NOLINT - nav_cam_params, sci_cam_params, // NOLINT - nav_block_sizes, sci_block_sizes, mesh_block_sizes, // NOLINT - nav_cam_affines, depth_clouds, mesh, // NOLINT - bvh_tree, // NOLINT - // Outputs // NOLINT - nav_sci_xyz_count, residual_names, // NOLINT - nav_cam_vec, hazcam_to_navcam_vec, // NOLINT - scicam_to_hazcam_vec, sci_cam_focal_vector, // NOLINT - sci_cam_optical_center, sci_cam_distortion, // NOLINT - initial_nav_sci_xyz, nav_sci_xyz, // NOLINT - problem); // NOLINT + // Evaluate the residuals after optimization + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale + residuals[it] /= residual_scales[it]; + dense_map::calc_median_residuals(residuals, residual_names, "after opt"); + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "Final res " << residual_names[it] << " " << residuals[it] << std::endl; } - } // end iterating over matches - if (nav_sci_xyz_count != num_nav_sci_xyz) LOG(FATAL) << "nav sci xyz book-keeping error."; + // Flag outliers + if (!FLAGS_refiner_skip_filtering) { + // Before computing outliers by triangulation angle must recompute all the cameras, + // given the optimized parameters + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); - if (sparse_map->pid_to_cid_fid_.size() != sparse_map->pid_to_xyz_.size()) - LOG(FATAL) << "Book-keeping error 1 in the sparse map."; + // Must deal with outliers by triangulation angle before + // removing outliers by reprojection error, as the latter will + // exclude some rays which form the given triangulated points. + int num_outliers_by_angle = 0, num_total_features = 0; + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + // Find the largest angle among any two intersecting rays + double max_rays_angle = 0.0; - for (size_t pid = 0; pid < sparse_map->pid_to_cid_fid_.size(); pid++) { - // Note that xyz is an alias. This is very important as we optimize these - // xyz in-place. - Eigen::Vector3d& xyz = sparse_map->pid_to_xyz_[pid]; + for (auto cid_fid1 = pid_to_cid_fid[pid].begin(); + cid_fid1 != pid_to_cid_fid[pid].end(); cid_fid1++) { + int cid1 = cid_fid1->first; + int fid1 = cid_fid1->second; - for (auto cid_fid = sparse_map->pid_to_cid_fid_[pid].begin(); - cid_fid != sparse_map->pid_to_cid_fid_[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid1, fid1)) continue; - Eigen::Matrix2Xd& keypoints = sparse_map->cid_to_keypoint_map_[cid]; // alias + num_total_features++; - if (fid > keypoints.cols()) LOG(FATAL) << "Book-keeping error 2 in the sparse map."; + Eigen::Vector3d cam_ctr1 = (world_to_cam[cid1].inverse()) * Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d ray1 = xyz_vec[pid] - cam_ctr1; + ray1.normalize(); - Eigen::Vector2d undist_nav_ip = keypoints.col(fid); + for (auto cid_fid2 = pid_to_cid_fid[pid].begin(); + cid_fid2 != pid_to_cid_fid[pid].end(); cid_fid2++) { + int cid2 = cid_fid2->first; + int fid2 = cid_fid2->second; - ceres::CostFunction* nav_cost_function = - dense_map::NavError::Create(undist_nav_ip, nav_block_sizes, nav_cam_params); - ceres::LossFunction* nav_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(nav_cost_function, nav_loss_function, - &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid], - &xyz[0]); - residual_names.push_back("nav1"); - residual_names.push_back("nav2"); + // Look at each cid and next cids + if (cid2 <= cid1) + continue; - if (FLAGS_fix_map) - problem.SetParameterBlockConstant(&nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); - } - } + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid2, fid2)) continue; - if (FLAGS_opt_map_only) { - problem.SetParameterBlockConstant(&hazcam_depth_to_image_vec[0]); - problem.SetParameterBlockConstant(&hazcam_depth_to_image_scale); - problem.SetParameterBlockConstant(&hazcam_to_navcam_vec[0]); - problem.SetParameterBlockConstant(&scicam_to_hazcam_vec[0]); - } + Eigen::Vector3d cam_ctr2 = (world_to_cam[cid2].inverse()) * Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d ray2 = xyz_vec[pid] - cam_ctr2; + ray2.normalize(); - // Evaluate the residual before optimization - double total_cost = 0.0; - std::vector residuals; - ceres::Problem::EvaluateOptions eval_options; - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "Book-keeping error in the number of residuals."; + double curr_angle = (180.0 / M_PI) * acos(ray1.dot(ray2)); - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "initial res " << residual_names[it] << " " << residuals[it] << std::endl; - } + if (std::isnan(curr_angle) || std::isinf(curr_angle)) continue; - // Want the RMSE residual with loss, to understand what all residuals contribute, - // but without getting distracted by the outliers - eval_options.apply_loss_function = false; - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - dense_map::calc_median_residuals(residuals, residual_names, "before opt"); - - // Experimentally it was found that it was better to keep the scale - // constant and optimize everything else. Later registration will - // happen to correct any drift in the nav cam poses. - if (!FLAGS_float_scale) problem.SetParameterBlockConstant(&hazcam_depth_to_image_scale); - - // See which sci cam intrinsics values to float or keep fixed - if (sci_cam_intrinsics_to_float.find("focal_length") == sci_cam_intrinsics_to_float.end()) { - // std::cout << "For " << sci_cam << " not floating focal_length." << std::endl; - problem.SetParameterBlockConstant(&sci_cam_focal_vector[0]); - } else { - // std::cout << "For " << sci_cam << " floating focal_length." << std::endl; - } - if (sci_cam_intrinsics_to_float.find("optical_center") == sci_cam_intrinsics_to_float.end()) { - // std::cout << "For " << sci_cam << " not floating optical_center." << std::endl; - problem.SetParameterBlockConstant(&sci_cam_optical_center[0]); - } else { - // std::cout << "For " << sci_cam << " floating optical_center." << std::endl; - } - if (sci_cam_intrinsics_to_float.find("distortion") == sci_cam_intrinsics_to_float.end()) { - // std::cout << "For " << sci_cam << " not floating distortion." << std::endl; - problem.SetParameterBlockConstant(&sci_cam_distortion[0]); - } else { - // std::cout << "For " << sci_cam << " floating distortion." << std::endl; - } + max_rays_angle = std::max(max_rays_angle, curr_angle); + } + } - // Solve the problem - ceres::Solver::Options options; - ceres::Solver::Summary summary; - options.linear_solver_type = ceres::ITERATIVE_SCHUR; - options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread - options.max_num_iterations = FLAGS_num_iterations; - options.minimizer_progress_to_stdout = true; - options.gradient_tolerance = 1e-16; - options.function_tolerance = 1e-16; - options.parameter_tolerance = FLAGS_parameter_tolerance; - ceres::Solve(options, &problem, &summary); - - // Copy back the optimized intrinsics - sci_cam_params.SetFocalLength(Eigen::Vector2d(sci_cam_focal_vector[0], - sci_cam_focal_vector[0])); - sci_cam_params.SetOpticalOffset(sci_cam_optical_center); - sci_cam_params.SetDistortion(sci_cam_distortion); - - // Update with the optimized results - dense_map::array_to_rigid_transform(hazcam_depth_to_image_transform, &hazcam_depth_to_image_vec[0]); - hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; - dense_map::array_to_rigid_transform(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); - dense_map::array_to_rigid_transform(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); - for (int cid = 0; cid < num_cams; cid++) - dense_map::array_to_rigid_transform(nav_cam_affines[cid], &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - // Compute the residuals without loss, want to see the outliers too - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "Book-keeping error in the number of residuals."; + if (max_rays_angle >= FLAGS_refiner_min_angle) + continue; // This is a good triangulated point, with large angle of convergence - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "final res " << residual_names[it] << " " << residuals[it] << std::endl; + // Flag as outliers all the features for this cid + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; + + num_outliers_by_angle++; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } + } + std::cout << std::setprecision(4) << "Removed " << num_outliers_by_angle + << " outlier features with small angle of convergence, out of " + << num_total_features << " (" + << (100.0 * num_outliers_by_angle) / num_total_features << " %)\n"; + + int num_outliers_reproj = 0; + num_total_features = 0; // reusing this variable + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; + + num_total_features++; + + // Find the pixel residuals + size_t residual_index = dense_map::getMapValue(pid_cid_fid_to_residual, pid, cid, fid); + if (residuals.size() <= residual_index + 1) LOG(FATAL) << "Too few residuals.\n"; + + double res_x = residuals[residual_index + 0]; + double res_y = residuals[residual_index + 1]; + // NaN values will never be inliers if the comparison is set as below + bool is_good = (Eigen::Vector2d(res_x, res_y).norm() <= FLAGS_max_reprojection_error); + if (!is_good) { + num_outliers_reproj++; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } + } + } + std::cout << std::setprecision(4) << "Removed " << num_outliers_reproj + << " outlier features using reprojection error, out of " << num_total_features + << " (" << (100.0 * num_outliers_reproj) / num_total_features << " %)\n"; + } } - // Want the RMSE residual with loss, to understand what all residuals contribute, - // but without getting distracted by the outliers - eval_options.apply_loss_function = false; - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - dense_map::calc_median_residuals(residuals, residual_names, "after opt"); + bool map_changed = (FLAGS_num_iterations > 0 && + (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "")); + if (map_changed) { + std::cout << "Either the sparse map intrinsics or cameras got modified. " + << "The map must be rebuilt." << std::endl; - // Re-register the map, and keep track of what scale was used. - // Note that we do not update the positions of the sci and haz - // images. We don't need those any more. - if (!FLAGS_skip_registration) { + dense_map::RebuildMap(FLAGS_output_map, // Will be used for temporary saving of aux data + cam_params[ref_cam_type], sparse_map); + } + + if (FLAGS_registration) { + std::cout << "Redoing registration of the obtained map and adjusting all extrinsics.\n"; std::vector data_files; data_files.push_back(FLAGS_hugin_file); data_files.push_back(FLAGS_xyz_file); @@ -2220,34 +2699,45 @@ int main(int argc, char** argv) { double map_scale = sparse_mapping::RegistrationOrVerification(data_files, verification, sparse_map.get()); - // Apply the scale - hazcam_depth_to_image_scale *= map_scale; - // This transform is affine, so both the linear and translation parts need a scale - hazcam_depth_to_image_transform.linear() *= map_scale; - hazcam_depth_to_image_transform.translation() *= map_scale; - // These two are rotation + translation, so only the translation needs the scale - hazcam_to_navcam_aff_trans.translation() *= map_scale; - scicam_to_hazcam_aff_trans.translation() *= map_scale; + std::cout << "Registration resulted in a scale adjustment of: " << map_scale << ".\n"; + // We do not change depth_to_image_scales since multiplying the + // affine component of normalized_depth_to_image is enough. + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + // This transform is affine, so both the linear and translation parts need a scale + normalized_depth_to_image[cam_type].linear() *= map_scale; + normalized_depth_to_image[cam_type].translation() *= map_scale; + // This is a rotation + translation, so only the translation needs the scale + ref_to_cam_trans[cam_type].translation() *= map_scale; + } } + // Update the optimized depth to image (for haz cam only) + int cam_type = 1; // haz cam + haz_cam_depth_to_image_scale = depth_to_image_scales[cam_type]; + haz_cam_depth_to_image_transform = normalized_depth_to_image[cam_type]; + haz_cam_depth_to_image_transform.linear() *= haz_cam_depth_to_image_scale; + + // Update the config file + dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", + cam_params, ref_to_cam_trans, + ref_to_cam_timestamp_offsets, + haz_cam_depth_to_image_transform); + std::cout << "Writing: " << FLAGS_output_map << std::endl; sparse_map->Save(FLAGS_output_map); - if (!FLAGS_opt_map_only) { - // Save back the results - // Recall that cam_names = {"nav_cam", "haz_cam", "sci_cam"}; - // nav_to_haz - ref_to_cam_trans[1] = hazcam_to_navcam_aff_trans.inverse(); - // nav_to_sci - ref_to_cam_trans[2] = scicam_to_hazcam_aff_trans.inverse() * - hazcam_to_navcam_aff_trans.inverse(); - ref_to_cam_timestamp_offsets[1] = navcam_to_hazcam_timestamp_offset; - ref_to_cam_timestamp_offsets[2] = - navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; - dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", - cam_params, ref_to_cam_trans, - ref_to_cam_timestamp_offsets, - hazcam_depth_to_image_transform); + if (FLAGS_out_texture_dir != "") { + if (FLAGS_mesh == "") + LOG(FATAL) << "Cannot project camera images onto a mesh if a mesh was not provided.\n"; + + // The transform from the world to every camera + std::vector world_to_cam; + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + meshProjectCameras(cam_names, cam_params, cams, world_to_cam, mesh, bvh_tree, ref_cam_type, + FLAGS_nav_cam_num_exclude_boundary_pixels, FLAGS_out_texture_dir); } return 0; diff --git a/dense_map/geometry_mapper/tools/camera_refiner2.cc b/dense_map/geometry_mapper/tools/camera_refiner2.cc deleted file mode 100644 index 65e38b8c..00000000 --- a/dense_map/geometry_mapper/tools/camera_refiner2.cc +++ /dev/null @@ -1,2713 +0,0 @@ -/* Copyright (c) 2021, United States Government, as represented by the - * Administrator of the National Aeronautics and Space Administration. - * - * All rights reserved. - * - * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking - * platform" software is licensed under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -// TODO(oalexan1): Move these OpenMVG headers to dense_map_utils.cc, -// and do forward declarations in dense_map_utils.h. Get rid of -// warnings beyond our control -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic push -#include -#include -#include -#include -#include -#pragma GCC diagnostic pop - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and " - "full-resolution sci_cam data."); - -DEFINE_string(sparse_map, "", - "A registered SURF sparse map made with some of the ROS bag data, " - "and including nav cam images closely bracketing the sci cam images."); - -DEFINE_string(output_map, "", "Output file containing the updated map."); - -DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); - -DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", - "The depth point cloud topic in the bag file."); - -DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", - "The depth camera intensity topic in the bag file."); - -DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); - -DEFINE_int32(num_overlaps, 10, "How many images (of all camera types) close and forward in " - "time to match to given image."); - -DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, - "Use depth haz cam clouds that are within this distance in " - "time from the nearest haz cam intensity image."); - -DEFINE_double(robust_threshold, 3.0, - "Pixel errors much larger than this will be exponentially attenuated " - "to affect less the cost function."); - -DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calibration."); - -DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by " - "less than this."); - -DEFINE_double(bracket_len, 0.6, - "Lookup sci and haz cam images only between consecutive nav cam images " - "whose distance in time is no more than this (in seconds), after adjusting " - "for the timestamp offset between these cameras. It is assumed the robot " - "moves slowly and uniformly during this time. A large value here will " - "make the refiner compute a poor solution but a small value will prevent " - "enough images being bracketed."); - -DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); - -DEFINE_string(sci_cam_timestamps, "", - "Use only these sci cam timestamps. Must be " - "a file with one timestamp per line."); - -DEFINE_bool(float_sparse_map, false, - "Optimize the sparse map. This should be avoided as it can invalidate the scales " - "of the extrinsics and the registration. It should at least be used with big mesh " - "weights to attenuate those effects."); - -DEFINE_bool(float_scale, false, - "If to optimize the scale of the clouds, part of haz_cam_depth_to_image_transform " - "(use it if the sparse map is kept fixed, or else rescaling and registration " - "of the map and extrinsics is needed). This parameter should not be used with " - "--affine_depth_to_image when the transform is affine, rather than rigid and a scale." - "See also --extrinsics_to_float."); - -DEFINE_bool(float_timestamp_offsets, false, - "If to optimize the timestamp offsets among the cameras."); - -DEFINE_double(timestamp_offsets_max_change, 1.0, - "If floating the timestamp offsets, do not let them change by more than this " - "(measured in seconds). Existing image bracketing acts as an additional constraint."); - -DEFINE_string(nav_cam_intrinsics_to_float, "", - "Refine 0 or more of the following intrinsics for nav_cam: focal_length, " - "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical_center'."); - -DEFINE_string(haz_cam_intrinsics_to_float, "", - "Refine 0 or more of the following intrinsics for haz_cam: focal_length, " - "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical_center'."); - -DEFINE_string(sci_cam_intrinsics_to_float, "", - "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " - "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical_center'."); - -DEFINE_string(extrinsics_to_float, "haz_cam sci_cam depth_to_image", - "Specify the cameras whose extrinsics, relative to nav_cam, to optimize. Also " - "consider if to float haz_cam_depth_to_image_transform."); - -DEFINE_double(nav_cam_to_sci_cam_offset_override_value, - std::numeric_limits::quiet_NaN(), - "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " - "file with this value."); - -DEFINE_double(depth_tri_weight, 1000.0, - "The weight to give to the constraint that depth measurements agree with " - "triangulated points. Use a bigger number as depth errors are usually on the " - "order of 0.01 meters while reprojection errors are on the order of 1 pixel."); - -DEFINE_string(mesh, "", - "Use this geometry mapper mesh from a previous run to help constrain " - "the calibration."); - -DEFINE_double(mesh_tri_weight, 0.0, - "A larger value will give more weight to the constraint that triangulated points. " - "stay close to a preexisting mesh. Not suggested by default."); - -DEFINE_double(depth_mesh_weight, 0.0, - "A larger value will give more weight to the constraint that the depth clouds " - "stay close to the mesh. Not suggested by default."); - -DEFINE_bool(affine_depth_to_image, false, "Assume that the depth_to_image_transform " - "for each depth + image camera is an arbitrary affine transform rather than a " - "rotation times a scale."); - -DEFINE_int32(refiner_num_passes, 2, "How many passes of optimization to do. Outliers will be " - "removed after every pass. Each pass will start with the previously optimized " - "solution as an initial guess. Mesh intersections (if applicable) and ray " - "triangulation will be recomputed before each pass."); - -DEFINE_double(initial_max_reprojection_error, 300.0, "If filtering outliers, remove interest " - "points for which the reprojection error, in pixels, is larger than this. This " - "filtering happens when matches are created, before cameras are optimized, and " - "a big value should be used if the initial cameras are not trusted."); - -DEFINE_double(max_reprojection_error, 25.0, "If filtering outliers, remove interest points for " - "which the reprojection error, in pixels, is larger than this. This filtering " - "happens after the optimization pass finishes unless disabled. It is better to not " - "filter too aggressively unless confident of the solution."); - -DEFINE_double(refiner_min_angle, 0.5, "If filtering outliers, remove triangulated points " - "for which all rays converging to it make an angle (in degrees) less than this." - "Note that some cameras in the rig may be very close to each other relative to " - "the points the rig images, so care is needed here."); - -DEFINE_bool(refiner_skip_filtering, false, "Do not do any outlier filtering."); - -DEFINE_double(min_ray_dist, 0.0, "The minimum search distance from a starting point along a ray " - "when intersecting the ray with a mesh, in meters (if applicable)."); - -DEFINE_double(max_ray_dist, 100.0, "The maximum search distance from a starting point along a ray " - "when intersecting the ray with a mesh, in meters (if applicable)."); - -DEFINE_string(out_texture_dir, "", "If non-empty and if an input mesh was provided, " - "project the camera images using the optimized poses onto the mesh " - "and write the obtained .obj files in the given directory."); - -DEFINE_bool(verbose, false, - "Print the residuals and save the images and match files." - "Stereo Pipeline's viewer can be used for visualizing these."); - -DEFINE_string(nav_cam_distortion_replacement, "", - "Replace nav_cam's distortion coefficients with this list after the initial " - "determination of triangulated points, and then continue with distortion " - "optimization. A quoted list of four or five values expected, separated by " - "spaces, as the replacement distortion model will have radial and tangential " - "coefficients. Set a positive --nav_cam_num_exclude_boundary_pixels."); - -DEFINE_int32(nav_cam_num_exclude_boundary_pixels, 0, - "Flag as outliers nav cam pixels closer than this to the boundary, and ignore " - "that boundary region when texturing with the --out_texture_dir option. " - "This may improve the calibration accuracy, especially if switching from fisheye " - "to radtan distortion for nav_cam. See also the geometry_mapper " - "--undistorted_crop_wins option."); - -namespace dense_map { - -// Calculate interpolated world to camera trans -Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, - const double* end_world_to_ref_t, - const double* ref_to_cam_trans, - double beg_ref_stamp, - double end_ref_stamp, - double ref_to_cam_offset, - double cam_stamp) { - Eigen::Affine3d beg_world_to_ref_aff; - array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); - - Eigen::Affine3d end_world_to_ref_aff; - array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); - - Eigen::Affine3d ref_to_cam_aff; - array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); - - // Covert from cam time to ref time and normalize. It is very - // important that below we subtract the big numbers from each - // other first, which are the timestamps, then subtract whatever - // else is necessary. Otherwise we get problems with numerical - // precision with CERES. - double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) - / (end_ref_stamp - beg_ref_stamp); - - if (beg_ref_stamp == end_ref_stamp) - alpha = 0.0; // handle division by zero - - if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; - - // Interpolate at desired time - Eigen::Affine3d interp_world_to_ref_aff - = dense_map::linearInterp(alpha, beg_world_to_ref_aff, end_world_to_ref_aff); - - Eigen::Affine3d interp_world_to_cam_aff = ref_to_cam_aff * interp_world_to_ref_aff; - - return interp_world_to_cam_aff; -} - -ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { - // Convert to lower-case - std::transform(cost_fun.begin(), cost_fun.end(), cost_fun.begin(), ::tolower); - - ceres::LossFunction* loss_function = NULL; - if (cost_fun == "l2") - loss_function = NULL; - else if (cost_fun == "huber") - loss_function = new ceres::HuberLoss(th); - else if (cost_fun == "cauchy") - loss_function = new ceres::CauchyLoss(th); - else if (cost_fun == "l1") - loss_function = new ceres::SoftLOneLoss(th); - else - LOG(FATAL) << "Unknown cost function: " + cost_fun; - - return loss_function; -} - -// An error function minimizing the error of projecting -// an xyz point into a camera that is bracketed by -// two reference cameras. The precise timestamp offset -// between them is also floated. -struct BracketedCamError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - BracketedCamError(Eigen::Vector2d const& meas_dist_pix, - double left_ref_stamp, double right_ref_stamp, double cam_stamp, - std::vector const& block_sizes, - camera::CameraParameters const& cam_params): - m_meas_dist_pix(meas_dist_pix), - m_left_ref_stamp(left_ref_stamp), - m_right_ref_stamp(right_ref_stamp), - m_cam_stamp(cam_stamp), - m_block_sizes(block_sizes), - m_cam_params(cam_params), - m_num_focal_lengths(1) { - // Sanity check - if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || - m_block_sizes[3] != NUM_XYZ_PARAMS || m_block_sizes[4] != NUM_SCALAR_PARAMS || - m_block_sizes[5] != m_num_focal_lengths || m_block_sizes[6] != NUM_OPT_CTR_PARAMS || - m_block_sizes[7] != 1 // This will be overwritten shortly - ) { - LOG(FATAL) << "BracketedCamError: The block sizes were not set up properly.\n"; - } - - // Set the correct distortion size. This cannot be done in the interface for now. - m_block_sizes[7] = m_cam_params.GetDistortion().size(); - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - Eigen::Affine3d world_to_cam_trans = - calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t - parameters[1], // end_world_to_ref_t - parameters[2], // ref_to_cam_trans - m_left_ref_stamp, m_right_ref_stamp, - parameters[4][0], // ref_to_cam_offset - m_cam_stamp); - - // World point - Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - - // Make a deep copy which we will modify - camera::CameraParameters cam_params = m_cam_params; - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); - Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); - Eigen::VectorXd distortion(m_block_sizes[7]); - for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; - cam_params.SetFocalLength(focal_vector); - cam_params.SetOpticalOffset(optical_center); - cam_params.SetDistortion(distortion); - - // Convert world point to given cam coordinates - X = world_to_cam_trans * X; - - // Project into the image - Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - Eigen::Vector2d curr_dist_pix; - cam_params.Convert(undist_pix, &curr_dist_pix); - - // Compute the residuals - residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; - residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* - Create(Eigen::Vector2d const& meas_dist_pix, double left_ref_stamp, double right_ref_stamp, - double cam_stamp, std::vector const& block_sizes, - camera::CameraParameters const& cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new BracketedCamError(meas_dist_pix, left_ref_stamp, right_ref_stamp, - cam_stamp, block_sizes, cam_params)); - - cost_function->SetNumResiduals(NUM_PIX_PARAMS); - - // The camera wrapper knows all of the block sizes to add, except - // for distortion, which is last - for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 - cost_function->AddParameterBlock(block_sizes[i]); - - // The distortion block size is added separately as it is variable - cost_function->AddParameterBlock(cam_params.GetDistortion().size()); - - return cost_function; - } - - private: - Eigen::Vector2d m_meas_dist_pix; // Measured distorted current camera pixel - double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps - double m_cam_stamp; // Current cam timestamp - std::vector m_block_sizes; - camera::CameraParameters m_cam_params; - int m_num_focal_lengths; -}; // End class BracketedCamError - -// An error function minimizing the error of projecting an xyz point -// into a reference camera. Bracketing, timestamps, and transform to -// ref cam are not needed. -struct RefCamError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - RefCamError(Eigen::Vector2d const& meas_dist_pix, - std::vector const& block_sizes, - camera::CameraParameters const& cam_params): - m_meas_dist_pix(meas_dist_pix), - m_block_sizes(block_sizes), - m_cam_params(cam_params), - m_num_focal_lengths(1) { - // Sanity check - if (m_block_sizes.size() != 5 || - m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_XYZ_PARAMS || - m_block_sizes[2] != m_num_focal_lengths || - m_block_sizes[3] != NUM_OPT_CTR_PARAMS || - m_block_sizes[4] != 1 // This will be overwritten shortly - ) { - LOG(FATAL) << "RefCamError: The block sizes were not set up properly.\n"; - } - - // Set the correct distortion size. This cannot be done in the interface for now. - m_block_sizes[4] = m_cam_params.GetDistortion().size(); - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - Eigen::Affine3d world_to_ref_t; - array_to_rigid_transform(world_to_ref_t, parameters[0]); - - // World point - Eigen::Vector3d X; - for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[1][it]; - - // Make a deep copy which we will modify - camera::CameraParameters cam_params = m_cam_params; - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[2][0], parameters[2][0]); - Eigen::Vector2d optical_center(parameters[3][0], parameters[3][1]); - Eigen::VectorXd distortion(m_block_sizes[4]); - for (int i = 0; i < m_block_sizes[4]; i++) distortion[i] = parameters[4][i]; - cam_params.SetFocalLength(focal_vector); - cam_params.SetOpticalOffset(optical_center); - cam_params.SetDistortion(distortion); - - // Convert world point to given cam coordinates - X = world_to_ref_t * X; - - // Project into the image - Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - Eigen::Vector2d curr_dist_pix; - cam_params.Convert(undist_pix, &curr_dist_pix); - - // Compute the residuals - residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; - residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* - Create(Eigen::Vector2d const& meas_dist_pix, - std::vector const& block_sizes, - camera::CameraParameters const& cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new RefCamError(meas_dist_pix, block_sizes, cam_params)); - - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_PIX_PARAMS); - - // The camera wrapper knows all of the block sizes to add, except - // for distortion, which is last - for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 - cost_function->AddParameterBlock(block_sizes[i]); - - // The distortion block size is added separately as it is variable - cost_function->AddParameterBlock(cam_params.GetDistortion().size()); - - return cost_function; - } - - private: - Eigen::Vector2d m_meas_dist_pix; - std::vector m_block_sizes; - camera::CameraParameters m_cam_params; - int m_num_focal_lengths; -}; // End class RefCamError - -// An error function minimizing the product of a given weight and the -// error between a triangulated point and a measured depth point. The -// depth point needs to be transformed to world coordinates first. For -// that one has to do pose interpolation. -struct BracketedDepthError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - BracketedDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, - double left_ref_stamp, double right_ref_stamp, double cam_stamp, - std::vector const& block_sizes): - m_weight(weight), - m_meas_depth_xyz(meas_depth_xyz), - m_left_ref_stamp(left_ref_stamp), - m_right_ref_stamp(right_ref_stamp), - m_cam_stamp(cam_stamp), - m_block_sizes(block_sizes) { - // Sanity check - if (m_block_sizes.size() != 7 || - m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || - (m_block_sizes[3] != NUM_RIGID_PARAMS && m_block_sizes[3] != NUM_AFFINE_PARAMS) || - m_block_sizes[4] != NUM_SCALAR_PARAMS || - m_block_sizes[5] != NUM_XYZ_PARAMS || - m_block_sizes[6] != NUM_SCALAR_PARAMS) { - LOG(FATAL) << "BracketedDepthError: The block sizes were not set up properly.\n"; - } - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - // Current world to camera transform - Eigen::Affine3d world_to_cam_trans = - calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t - parameters[1], // end_world_to_ref_t - parameters[2], // ref_to_cam_trans - m_left_ref_stamp, m_right_ref_stamp, - parameters[6][0], // ref_to_cam_offset - m_cam_stamp); - - // The current transform from the depth point cloud to the camera image - Eigen::Affine3d depth_to_image; - if (m_block_sizes[3] == NUM_AFFINE_PARAMS) - array_to_affine_transform(depth_to_image, parameters[3]); - else - array_to_rigid_transform(depth_to_image, parameters[3]); - - // Apply the scale - double depth_to_image_scale = parameters[4][0]; - depth_to_image.linear() *= depth_to_image_scale; - - // Convert from depth cloud coordinates to cam coordinates - Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - - // Convert to world coordinates - M = world_to_cam_trans.inverse() * M; - - // Triangulated world point - Eigen::Vector3d X(parameters[5][0], parameters[5][1], parameters[5][2]); - - // Compute the residuals - for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { - residuals[it] = m_weight * (X[it] - M[it]); - } - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, - double left_ref_stamp, double right_ref_stamp, - double cam_stamp, std::vector const& block_sizes) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new BracketedDepthError(weight, meas_depth_xyz, left_ref_stamp, right_ref_stamp, - cam_stamp, block_sizes)); - - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - - for (size_t i = 0; i < block_sizes.size(); i++) - cost_function->AddParameterBlock(block_sizes[i]); - - return cost_function; - } - - private: - double m_weight; // How much weight to give to this constraint - Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement - double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps - double m_cam_stamp; // Current cam timestamp - std::vector m_block_sizes; -}; // End class BracketedDepthError - -// An error function minimizing the product of a given weight and the -// error between a mesh point and a transformed measured depth point. The -// depth point needs to be transformed to world coordinates first. For -// that one has to do pose interpolation. -struct BracketedDepthMeshError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - BracketedDepthMeshError(double weight, - Eigen::Vector3d const& meas_depth_xyz, - Eigen::Vector3d const& mesh_xyz, - double left_ref_stamp, double right_ref_stamp, double cam_stamp, - std::vector const& block_sizes): - m_weight(weight), - m_meas_depth_xyz(meas_depth_xyz), - m_mesh_xyz(mesh_xyz), - m_left_ref_stamp(left_ref_stamp), - m_right_ref_stamp(right_ref_stamp), - m_cam_stamp(cam_stamp), - m_block_sizes(block_sizes) { - // Sanity check - if (m_block_sizes.size() != 6 || - m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || - (m_block_sizes[3] != NUM_RIGID_PARAMS && m_block_sizes[3] != NUM_AFFINE_PARAMS) || - m_block_sizes[4] != NUM_SCALAR_PARAMS || - m_block_sizes[5] != NUM_SCALAR_PARAMS) { - LOG(FATAL) << "BracketedDepthMeshError: The block sizes were not set up properly.\n"; - } - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - // Current world to camera transform - Eigen::Affine3d world_to_cam_trans = - calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t - parameters[1], // end_world_to_ref_t - parameters[2], // ref_to_cam_trans - m_left_ref_stamp, m_right_ref_stamp, - parameters[5][0], // ref_to_cam_offset - m_cam_stamp); - - // The current transform from the depth point cloud to the camera image - Eigen::Affine3d depth_to_image; - if (m_block_sizes[3] == NUM_AFFINE_PARAMS) - array_to_affine_transform(depth_to_image, parameters[3]); - else - array_to_rigid_transform(depth_to_image, parameters[3]); - - // Apply the scale - double depth_to_image_scale = parameters[4][0]; - depth_to_image.linear() *= depth_to_image_scale; - - // Convert from depth cloud coordinates to cam coordinates - Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - - // Convert to world coordinates - M = world_to_cam_trans.inverse() * M; - - // Compute the residuals - for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { - residuals[it] = m_weight * (m_mesh_xyz[it] - M[it]); - } - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(double weight, - Eigen::Vector3d const& meas_depth_xyz, - Eigen::Vector3d const& mesh_xyz, - double left_ref_stamp, double right_ref_stamp, - double cam_stamp, std::vector const& block_sizes) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new BracketedDepthMeshError(weight, meas_depth_xyz, mesh_xyz, - left_ref_stamp, right_ref_stamp, - cam_stamp, block_sizes)); - - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - - for (size_t i = 0; i < block_sizes.size(); i++) - cost_function->AddParameterBlock(block_sizes[i]); - - return cost_function; - } - - private: - double m_weight; // How much weight to give to this constraint - Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement - Eigen::Vector3d m_mesh_xyz; // Point on preexisting mesh - double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps - double m_cam_stamp; // Current cam timestamp - std::vector m_block_sizes; -}; // End class BracketedDepthMeshError - -// An error function minimizing the product of a given weight and the -// error between a triangulated point and a measured depth point for -// the ref camera. The depth point needs to be transformed to world -// coordinates first. -struct RefDepthError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - RefDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, - std::vector const& block_sizes): - m_weight(weight), - m_meas_depth_xyz(meas_depth_xyz), - m_block_sizes(block_sizes) { - // Sanity check - if (m_block_sizes.size() != 4 || - m_block_sizes[0] != NUM_RIGID_PARAMS || - (m_block_sizes[1] != NUM_RIGID_PARAMS && m_block_sizes[1] != NUM_AFFINE_PARAMS) || - m_block_sizes[2] != NUM_SCALAR_PARAMS || - m_block_sizes[3] != NUM_XYZ_PARAMS) { - LOG(FATAL) << "RefDepthError: The block sizes were not set up properly.\n"; - } - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - // Current world to camera transform - Eigen::Affine3d world_to_cam; - array_to_rigid_transform(world_to_cam, parameters[0]); - - // The current transform from the depth point cloud to the camera image - Eigen::Affine3d depth_to_image; - if (m_block_sizes[1] == NUM_AFFINE_PARAMS) - array_to_affine_transform(depth_to_image, parameters[1]); - else - array_to_rigid_transform(depth_to_image, parameters[1]); - - // Apply the scale - double depth_to_image_scale = parameters[2][0]; - depth_to_image.linear() *= depth_to_image_scale; - - // Convert from depth cloud coordinates to cam coordinates - Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - - // Convert to world coordinates - M = world_to_cam.inverse() * M; - - // Triangulated world point - Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - - // Compute the residuals - for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { - residuals[it] = m_weight * (X[it] - M[it]); - } - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, - std::vector const& block_sizes) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new RefDepthError(weight, meas_depth_xyz, block_sizes)); - - cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - - for (size_t i = 0; i < block_sizes.size(); i++) - cost_function->AddParameterBlock(block_sizes[i]); - - return cost_function; - } - - private: - double m_weight; // How much weight to give to this constraint - Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement - std::vector m_block_sizes; -}; // End class RefDepthError - -// An error function minimizing a weight times the distance from a -// variable xyz point to a fixed reference xyz point. -struct XYZError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - XYZError(Eigen::Vector3d const& ref_xyz, std::vector const& block_sizes, double weight) - : m_ref_xyz(ref_xyz), m_block_sizes(block_sizes), m_weight(weight) { - // Sanity check - if (m_block_sizes.size() != 1 || m_block_sizes[0] != NUM_XYZ_PARAMS) - LOG(FATAL) << "XYZError: The block sizes were not set up properly.\n"; - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - // Takes array of arrays as parameters. - // TODO(oalexan1): May want to use the analytical Ceres cost function - bool operator()(double const* const* parameters, double* residuals) const { - // Compute the residuals - for (int it = 0; it < NUM_XYZ_PARAMS; it++) - residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector3d const& ref_xyz, - std::vector const& block_sizes, - double weight) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new XYZError(ref_xyz, block_sizes, weight)); - - // The residual size is always the same - cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { - cost_function->AddParameterBlock(block_sizes[i]); - } - return cost_function; - } - - private: - Eigen::Vector3d m_ref_xyz; // reference xyz - std::vector m_block_sizes; - double m_weight; -}; // End class XYZError - -// Calculate the rmse residual for each residual type. -void calc_median_residuals(std::vector const& residuals, - std::vector const& residual_names, - std::string const& tag) { - size_t num = residuals.size(); - - if (num != residual_names.size()) - LOG(FATAL) << "There must be as many residuals as residual names."; - - std::map > stats; - for (size_t it = 0; it < residuals.size(); it++) - stats[residual_names[it]] = std::vector(); // initialize - - for (size_t it = 0; it < residuals.size(); it++) - stats[residual_names[it]].push_back(std::abs(residuals[it])); - - std::cout << "The 25, 50, 75, and 100th percentile residual stats " << tag << std::endl; - for (auto it = stats.begin(); it != stats.end(); it++) { - std::string const& name = it->first; - std::vector vals = stats[name]; // make a copy - std::sort(vals.begin(), vals.end()); - - int len = vals.size(); - - int it1 = static_cast(0.25 * len); - int it2 = static_cast(0.50 * len); - int it3 = static_cast(0.75 * len); - int it4 = static_cast(len - 1); - - if (len == 0) - std::cout << name << ": " << "none"; - else - std::cout << std::setprecision(8) - << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' - << vals[it3] << ' ' << vals[it4]; - std::cout << " (" << len << " residuals)" << std::endl; - } -} - - typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; - - // A class to encompass all known information about a camera - // This is work in progress and will replace some of the logic further down. - struct cameraImage { - // An index to look up the type of camera. This will equal the - // value ref_camera_type if and only if this is a reference - // camera. - int camera_type; - - // The timestamp for this camera (in floating point seconds since epoch) - // measured by the clock/cpu which is particular to this camera. - double timestamp; - - // The timestamp with an adjustment added to it to be in - // reference camera time - double ref_timestamp; - - // The timestamp of the closest cloud for this image, measured - // with the same clock as the 'timestamp' value. - double cloud_timestamp; - - // Indices to look up the reference cameras bracketing this camera - // in time. The two indices will have same value if and only if - // this is a reference camera. - int beg_ref_index; - int end_ref_index; - - // The image for this camera, in grayscale - cv::Mat image; - - // The corresponding depth cloud, for an image + depth camera. - // Will be empty and uninitialized for a camera lacking depth. - cv::Mat depth_cloud; - }; - - // Sort by timestamps adjusted to be relative to the ref camera clock - bool timestampLess(cameraImage i, cameraImage j) { - return (i.ref_timestamp < j.ref_timestamp); - } - - // Find the haz cam depth measurement. Use nearest neighbor interpolation - // to look into the depth cloud. - bool depthValue( // Inputs - cv::Mat const& depth_cloud, Eigen::Vector2d const& dist_ip, - // Output - Eigen::Vector3d& depth_xyz) { - depth_xyz = Eigen::Vector3d(0, 0, 0); // initialize - - if (depth_cloud.cols == 0 && depth_cloud.rows == 0) return false; // empty cloud - - int col = round(dist_ip[0]); - int row = round(dist_ip[1]); - - if (col < 0 || row < 0 || col > depth_cloud.cols || row > depth_cloud.rows) - LOG(FATAL) << "Out of range in depth cloud."; - - // After rounding one may hit the bound - if (col == depth_cloud.cols || row == depth_cloud.rows) - return false; - - cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); - - // Skip invalid measurements - if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) - return false; - - depth_xyz = Eigen::Vector3d(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); - - return true; - } - - // Project given images with optimized cameras onto mesh. It is - // assumed that the most up-to-date cameras were copied/interpolated - // form the optimizer structures into the world_to_cam vector. - void meshProjectCameras(std::vector const& cam_names, - std::vector const& cam_params, - std::vector const& cam_images, - std::vector const& world_to_cam, - mve::TriangleMesh::Ptr const& mesh, - std::shared_ptr const& bvh_tree, - int ref_camera_type, int nav_cam_num_exclude_boundary_pixels, - std::string const& out_dir) { - if (cam_names.size() != cam_params.size()) - LOG(FATAL) << "There must be as many camera names as sets of camera parameters.\n"; - if (cam_images.size() != world_to_cam.size()) - LOG(FATAL) << "There must be as many camera images as camera poses.\n"; - if (out_dir.empty()) - LOG(FATAL) << "The output directory is empty.\n"; - - char filename_buffer[1000]; - - for (size_t cid = 0; cid < cam_images.size(); cid++) { - double timestamp = cam_images[cid].timestamp; - int cam_type = cam_images[cid].camera_type; - - int num_exclude_boundary_pixels = 0; - if (cam_type == ref_camera_type) - num_exclude_boundary_pixels = nav_cam_num_exclude_boundary_pixels; - - // Must use the 10.7f format for the timestamp as everywhere else in the code - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s", - out_dir.c_str(), timestamp, cam_names[cam_type].c_str()); - std::string out_prefix = filename_buffer; // convert to string - - std::cout << "Creating texture for: " << out_prefix << std::endl; - meshProject(mesh, bvh_tree, cam_images[cid].image, world_to_cam[cid], cam_params[cam_type], - num_exclude_boundary_pixels, out_prefix); - } - } - - // Rebuild the map. - // TODO(oalexan1): This must be integrated in astrobee. - void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data - camera::CameraParameters const& cam_params, - boost::shared_ptr sparse_map) { - std::string rebuild_detector = "SURF"; - std::cout << "Rebuilding map with " << rebuild_detector << " detector."; - - // Set programatically the command line option for astrobee's map - // building min angle based on the corresponding refiner flag. - std::ostringstream oss; - oss << FLAGS_refiner_min_angle; - std::string min_valid_angle = oss.str(); - google::SetCommandLineOption("min_valid_angle", min_valid_angle.c_str()); - if (!gflags::GetCommandLineOption("min_valid_angle", &min_valid_angle)) - LOG(FATAL) << "Failed to get the value of --min_valid_angle in Astrobee " - << "map-building software.\n"; - // The newline below is due to the sparse map software not putting a newline - std::cout << "\nSetting --min_valid_angle " << min_valid_angle << ".\n"; - - // Copy some data to make sure it does not get lost on resetting things below - std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; - std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; - - // Ensure the new camera parameters are set - sparse_map->SetCameraParameters(cam_params); - - std::cout << "Detecting features."; - sparse_map->DetectFeatures(); - - std::cout << "Matching features."; - // Borrow from the original map which images should be matched with which. - sparse_map->cid_to_cid_.clear(); - for (size_t p = 0; p < pid_to_cid_fid.size(); p++) { - std::map const& track = pid_to_cid_fid[p]; // alias - for (std::map::const_iterator it1 = track.begin(); - it1 != track.end() ; it1++) { - for (std::map::const_iterator it2 = it1; - it2 != track.end() ; it2++) { - if (it1->first != it2->first) { - // Never match an image with itself - sparse_map->cid_to_cid_[it1->first].insert(it2->first); - } - } - } - } - - sparse_mapping::MatchFeatures(sparse_mapping::EssentialFile(map_file), - sparse_mapping::MatchesFile(map_file), sparse_map.get()); - for (size_t i = 0; i < world_to_ref_t.size(); i++) - sparse_map->SetFrameGlobalTransform(i, world_to_ref_t[i]); - - // Wipe file that is no longer needed - try { - std::remove(sparse_mapping::EssentialFile(map_file).c_str()); - }catch(...) {} - - std::cout << "Building tracks."; - bool rm_invalid_xyz = true; // by now cameras are good, so filter out bad stuff - sparse_mapping::BuildTracks(rm_invalid_xyz, - sparse_mapping::MatchesFile(map_file), - sparse_map.get()); - - // It is essential that during re-building we do not vary the - // cameras. Those were usually computed with a lot of SURF features, - // while rebuilding is usually done with many fewer ORGBRISK - // features. - bool fix_cameras = true; - if (fix_cameras) - std::cout << "Performing bundle adjustment with fixed cameras."; - else - std::cout << "Performing bundle adjustment while floating cameras."; - - sparse_mapping::BundleAdjust(fix_cameras, sparse_map.get()); - } - - // TODO(oalexan1): Move this to utils. - // Intersect ray with mesh. Return true on success. - bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, - camera::CameraParameters const& cam_params, - Eigen::Affine3d const& world_to_cam, - mve::TriangleMesh::Ptr const& mesh, - std::shared_ptr const& bvh_tree, - double min_ray_dist, double max_ray_dist, - // Output - Eigen::Vector3d& intersection) { - // Initialize the output - intersection = Eigen::Vector3d(0.0, 0.0, 0.0); - - // Ray from camera going through the pixel - Eigen::Vector3d cam_ray(undist_pix.x() / cam_params.GetFocalVector()[0], - undist_pix.y() / cam_params.GetFocalVector()[1], 1.0); - cam_ray.normalize(); - - Eigen::Affine3d cam_to_world = world_to_cam.inverse(); - Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; - Eigen::Vector3d cam_ctr = cam_to_world.translation(); - - // Set up the ray structure for the mesh - BVHTree::Ray bvh_ray; - bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); - bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); - bvh_ray.dir.normalize(); - - bvh_ray.tmin = min_ray_dist; - bvh_ray.tmax = max_ray_dist; - - // Intersect the ray with the mesh - BVHTree::Hit hit; - if (bvh_tree->intersect(bvh_ray, &hit)) { - double cam_to_mesh_dist = hit.t; - intersection = cam_ctr + cam_to_mesh_dist * world_ray; - return true; - } - - return false; - } - - // Compute the transforms from the world to every camera, using pose interpolation - // if necessary. - void calc_world_to_cam_transforms( // Inputs - std::vector const& cams, std::vector const& world_to_ref_vec, - std::vector const& ref_timestamps, std::vector const& ref_to_cam_vec, - std::vector const& ref_to_cam_timestamp_offsets, - // Output - std::vector& world_to_cam) { - if (ref_to_cam_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_to_cam_timestamp_offsets.size()) - LOG(FATAL) << "Must have as many transforms to reference as timestamp offsets.\n"; - if (world_to_ref_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_timestamps.size()) - LOG(FATAL) << "Must have as many reference timestamps as reference cameras.\n"; - - world_to_cam.resize(cams.size()); - - for (size_t it = 0; it < cams.size(); it++) { - int beg_index = cams[it].beg_ref_index; - int end_index = cams[it].end_ref_index; - int cam_type = cams[it].camera_type; - world_to_cam[it] = dense_map::calc_world_to_cam_trans - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - ref_timestamps[beg_index], ref_timestamps[end_index], - ref_to_cam_timestamp_offsets[cam_type], - cams[it].timestamp); - } - return; - } - - // Match features while assuming that the input cameras can be used to filter out - // outliers by reprojection error. - void matchFeaturesWithCams(std::mutex* match_mutex, int left_image_index, int right_image_index, - camera::CameraParameters const& left_params, - camera::CameraParameters const& right_params, - Eigen::Affine3d const& left_world_to_cam, - Eigen::Affine3d const& right_world_to_cam, - double reprojection_error, - cv::Mat const& left_descriptors, cv::Mat const& right_descriptors, - Eigen::Matrix2Xd const& left_keypoints, - Eigen::Matrix2Xd const& right_keypoints, - bool verbose, - // output - MATCH_PAIR* matches) { - // Match by using descriptors first - std::vector cv_matches; - interest_point::FindMatches(left_descriptors, right_descriptors, &cv_matches); - - // Do filtering - std::vector left_vec; - std::vector right_vec; - std::vector filtered_cv_matches; - for (size_t j = 0; j < cv_matches.size(); j++) { - int left_ip_index = cv_matches.at(j).queryIdx; - int right_ip_index = cv_matches.at(j).trainIdx; - - Eigen::Vector2d dist_left_ip(left_keypoints.col(left_ip_index)[0], - left_keypoints.col(left_ip_index)[1]); - - Eigen::Vector2d dist_right_ip(right_keypoints.col(right_ip_index)[0], - right_keypoints.col(right_ip_index)[1]); - - Eigen::Vector2d undist_left_ip; - Eigen::Vector2d undist_right_ip; - left_params.Convert - (dist_left_ip, &undist_left_ip); - right_params.Convert - (dist_right_ip, &undist_right_ip); - - Eigen::Vector3d X = - dense_map::TriangulatePair(left_params.GetFocalLength(), right_params.GetFocalLength(), - left_world_to_cam, right_world_to_cam, - undist_left_ip, undist_right_ip); - - // Project back into the cameras - Eigen::Vector3d left_cam_X = left_world_to_cam * X; - Eigen::Vector2d undist_left_pix - = left_params.GetFocalVector().cwiseProduct(left_cam_X.hnormalized()); - Eigen::Vector2d dist_left_pix; - left_params.Convert(undist_left_pix, - &dist_left_pix); - - Eigen::Vector3d right_cam_X = right_world_to_cam * X; - Eigen::Vector2d undist_right_pix - = right_params.GetFocalVector().cwiseProduct(right_cam_X.hnormalized()); - Eigen::Vector2d dist_right_pix; - right_params.Convert(undist_right_pix, - &dist_right_pix); - - // Filter out points whose reprojection error is too big - bool is_good = ((dist_left_ip - dist_left_pix).norm() <= reprojection_error && - (dist_right_ip - dist_right_pix).norm() <= reprojection_error); - - // If any values above are Inf or NaN, is_good will be false as well - if (!is_good) continue; - - // Get the keypoints from the good matches - left_vec.push_back(cv::Point2f(left_keypoints.col(left_ip_index)[0], - left_keypoints.col(left_ip_index)[1])); - right_vec.push_back(cv::Point2f(right_keypoints.col(right_ip_index)[0], - right_keypoints.col(right_ip_index)[1])); - - filtered_cv_matches.push_back(cv_matches[j]); - } - - if (left_vec.empty()) return; - - // Filter using geometry constraints - // These may need some tweaking but works reasonably well. - double ransacReprojThreshold = 20.0; - cv::Mat inlier_mask; - int maxIters = 10000; - double confidence = 0.8; - - // affine2D works better than homography - // cv::Mat H = cv::findHomography(left_vec, right_vec, cv::RANSAC, - // ransacReprojThreshold, inlier_mask, maxIters, confidence); - cv::Mat H = cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, - ransacReprojThreshold, maxIters, confidence); - - std::vector left_ip, right_ip; - for (size_t j = 0; j < filtered_cv_matches.size(); j++) { - int left_ip_index = filtered_cv_matches.at(j).queryIdx; - int right_ip_index = filtered_cv_matches.at(j).trainIdx; - - if (inlier_mask.at(j, 0) == 0) continue; - - cv::Mat left_desc = left_descriptors.row(left_ip_index); - cv::Mat right_desc = right_descriptors.row(right_ip_index); - - InterestPoint left; - left.setFromCvKeypoint(left_keypoints.col(left_ip_index), left_desc); - - InterestPoint right; - right.setFromCvKeypoint(right_keypoints.col(right_ip_index), right_desc); - - left_ip.push_back(left); - right_ip.push_back(right); - } - - // Update the shared variable using a lock - match_mutex->lock(); - - // Print the verbose message inside the lock, otherwise the text - // may get messed up. - if (verbose) - std::cout << "Number of matches for pair " - << left_image_index << ' ' << right_image_index << ": " - << left_ip.size() << std::endl; - - *matches = std::make_pair(left_ip, right_ip); - match_mutex->unlock(); - } - - // Find if a given feature is an inlier, and take care to check that - // the bookkeeping is correct. - int getMapValue(std::vector>> const& pid_cid_fid, - size_t pid, int cid, int fid) { - if (pid_cid_fid.size() <= pid) - LOG(FATAL) << "Current pid is out of range.\n"; - - auto& cid_fid_map = pid_cid_fid[pid]; // alias - auto cid_it = cid_fid_map.find(cid); - if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; - - auto& fid_map = cid_it->second; // alias - auto fid_it = fid_map.find(fid); - if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; - - return fid_it->second; - } - - // Set a feature to be an outlier, and take care to check that - // the bookkeeping is correct. - void setMapValue(std::vector>> & pid_cid_fid, - size_t pid, int cid, int fid, int val) { - if (pid_cid_fid.size() <= pid) - LOG(FATAL) << "Current pid is out of range.\n"; - - auto& cid_fid_map = pid_cid_fid[pid]; // alias - auto cid_it = cid_fid_map.find(cid); - if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; - - auto& fid_map = cid_it->second; // alias - auto fid_it = fid_map.find(fid); - if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; - - fid_it->second = val; - } - -} // namespace dense_map - -int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - GOOGLE_PROTOBUF_VERIFY_VERSION; - - std::cout.precision(17); // to be able to print timestamps - - if (FLAGS_ros_bag.empty()) - LOG(FATAL) << "The bag file was not specified."; - if (FLAGS_sparse_map.empty()) - LOG(FATAL) << "The input sparse map was not specified."; - - if (FLAGS_output_map.empty()) - LOG(FATAL) << "The output sparse map was not specified."; - - if (FLAGS_robust_threshold <= 0.0) - LOG(FATAL) << "The robust threshold must be positive.\n"; - - if (FLAGS_bracket_len <= 0.0) LOG(FATAL) << "Bracket length must be positive."; - - if (FLAGS_num_overlaps < 1) LOG(FATAL) << "Number of overlaps must be positive."; - - if (FLAGS_timestamp_offsets_max_change < 0) - LOG(FATAL) << "The timestamp offsets must be non-negative."; - - if (FLAGS_refiner_min_angle <= 0.0) - LOG(FATAL) << "The min triangulation angle must be positive.\n"; - - if (FLAGS_depth_tri_weight < 0.0) - LOG(FATAL) << "The depth weight must non-negative.\n"; - - if (FLAGS_mesh_tri_weight < 0.0) - LOG(FATAL) << "The mesh weight must non-negative.\n"; - - if (FLAGS_depth_mesh_weight < 0.0) - LOG(FATAL) << "The depth mesh weight must non-negative.\n"; - - if (FLAGS_nav_cam_num_exclude_boundary_pixels < 0) - LOG(FATAL) << "Must have a non-negative value for --nav_cam_num_exclude_boundary_pixels.\n"; - - if (FLAGS_nav_cam_distortion_replacement != "") { - if (FLAGS_haz_cam_intrinsics_to_float != "" || - FLAGS_sci_cam_intrinsics_to_float != "" || - FLAGS_extrinsics_to_float != "" || - FLAGS_float_sparse_map || - FLAGS_float_scale || - FLAGS_float_timestamp_offsets) - LOG(FATAL) << "If replacing and optimizing the nav_cam model distortion, the rest " - "of the variables must be kept fixed. Once this model is found and saved, " - "a subsequent call to this tool may do various co-optimizations."; - } - - // We assume our camera rig has n camera types. Each can be image or - // depth + image. Just one camera must be the reference camera. In - // this code it will be nav_cam. Every camera object (class - // cameraImage) knows its type (an index), which can be used to look - // up its intrinsics, image topic, depth topic (if present), - // ref_to_cam_timestamp_offset, and ref_to_cam_transform. A camera - // object also stores its image, depth cloud (if present), its - // timestamp, and indices pointing to its left and right ref - // bracketing cameras. - - // For every instance of a reference camera its - // ref_to_cam_timestamp_offset is 0 and kept fixed, - // ref_to_cam_transform is the identity and kept fixed, and the - // indices pointing to the left and right ref bracketing cameras are - // identical. - - // The info below will eventually come from a file - int num_cam_types = 3; - int ref_cam_type = 0; // Below we assume the starting cam is the ref cam - - // Image and depth topics - std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; - std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", - "/hw/depth_haz/extended/amplitude_int", - "/hw/cam_sci/compressed"}; - std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; - - std::vector cam_params; - std::vector ref_to_cam_trans; - std::vector ref_to_cam_timestamp_offsets; - Eigen::Affine3d nav_cam_to_body_trans; - Eigen::Affine3d haz_cam_depth_to_image_transform; - dense_map::readConfigFile( // Inputs - cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", - // Outputs - cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, nav_cam_to_body_trans, - haz_cam_depth_to_image_transform); - std::vector orig_cam_params = cam_params; - - if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { - for (size_t it = 0; it < cam_names.size(); it++) { - if (cam_names[it] == "sci_cam") { - double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; - std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] - << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; - ref_to_cam_timestamp_offsets[it] = new_val; - } - } - } - - mve::TriangleMesh::Ptr mesh; - std::shared_ptr mesh_info; - std::shared_ptr graph; - std::shared_ptr bvh_tree; - if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); - - // If desired to process only specific timestamps - std::set sci_cam_timestamps_to_use; - if (FLAGS_sci_cam_timestamps != "") { - std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); - double val; - while (ifs >> val) sci_cam_timestamps_to_use.insert(val); - } - - double haz_cam_depth_to_image_scale - = pow(haz_cam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); - - // Since we will keep the scale fixed, vary the part of the transform without - // the scale, while adding the scale each time before the transform is applied - Eigen::Affine3d haz_cam_depth_to_image_noscale = haz_cam_depth_to_image_transform; - haz_cam_depth_to_image_noscale.linear() /= haz_cam_depth_to_image_scale; - - // Read the sparse map - boost::shared_ptr sparse_map = - boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); - - // Deal with using a non-FOV model for nav_cam, if desired - Eigen::VectorXd nav_cam_distortion_replacement; - if (FLAGS_nav_cam_distortion_replacement != "") { - std::vector vec = dense_map::string_to_vector(FLAGS_nav_cam_distortion_replacement); - if (vec.size() != 4 && vec.size() != 5) - LOG(FATAL) << "nav_cam distortion replacement must consist of 4 or 5 values, corresponding" - << "to radial and tangential distortion coefficients.\n"; - nav_cam_distortion_replacement - = Eigen::Map(vec.data(), vec.size()); - } - - if (FLAGS_refiner_skip_filtering && - (cam_params[ref_cam_type].GetDistortion().size() > 1 || - nav_cam_distortion_replacement.size() != 0)) - LOG(FATAL) << "Must use outlier filtering if a non-fisheye lens distortion is used" - << "with nav_cam, as it is hard to to fit such a model at the image periphery.\n"; - - // TODO(oalexan1): All this timestamp reading logic below should be in a function - - // Parse the ref timestamps from the sparse map - // We assume the sparse map image names are the timestamps. - std::vector ref_timestamps; - const std::vector& sparse_map_images = sparse_map->cid_to_filename_; - ref_timestamps.resize(sparse_map_images.size()); - for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { - double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); - ref_timestamps[cid] = timestamp; - } - if (ref_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; - - // Will optimize the nav cam poses as part of the process - std::vector & world_to_ref_t = sparse_map->cid_to_cam_t_global_; // alias - - // Put transforms of the reference cameras in a vector. We will optimize them. - int num_ref_cams = world_to_ref_t.size(); - if (world_to_ref_t.size() != ref_timestamps.size()) - LOG(FATAL) << "Must have as many ref cam timestamps as ref cameras.\n"; - std::vector world_to_ref_vec(num_ref_cams * dense_map::NUM_RIGID_PARAMS); - for (int cid = 0; cid < num_ref_cams; cid++) - dense_map::rigid_transform_to_array(world_to_ref_t[cid], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - - std::vector> cam_timestamps_to_use = {std::set(), - std::set(), - sci_cam_timestamps_to_use}; - - // Which intrinsics from which cameras to float - std::vector> intrinsics_to_float(num_cam_types); - dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); - dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); - dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); - - std::string depth_to_image_name = "depth_to_image"; - std::set extrinsics_to_float; - dense_map::parse_extrinsics_to_float(cam_names, depth_to_image_name, - FLAGS_extrinsics_to_float, extrinsics_to_float); - - if (FLAGS_float_scale && FLAGS_affine_depth_to_image) - LOG(FATAL) << "The options --float_scale and --affine_depth_to_image should not be used " - << "together. If the latter is used, the scale is always floated.\n"; - - if (!FLAGS_affine_depth_to_image && FLAGS_float_scale && - extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) - LOG(FATAL) << "Cannot float the scale of depth_to_image_transform unless this " - << "this is allowed as part of --extrinsics_to_float.\n"; - - if (FLAGS_nav_cam_distortion_replacement != "") { - if (intrinsics_to_float[ref_cam_type].find("distortion") - == intrinsics_to_float[ref_cam_type].end() || - intrinsics_to_float[ref_cam_type].size() != 1) { - LOG(FATAL) << "When --nav_cam_distortion_replacement is used, must float the nav cam " - << "distortion and no other nav cam intrinsics.\n"; - } - } - - // Put in arrays, so we can optimize them - std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::rigid_transform_to_array - (ref_to_cam_trans[cam_type], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - - // Set up the variable blocks to optimize for BracketedDepthError - int num_depth_params = dense_map::NUM_RIGID_PARAMS; - if (FLAGS_affine_depth_to_image) num_depth_params = dense_map::NUM_AFFINE_PARAMS; - - // Depth to image transforms and scales - std::vector depth_to_image_noscale; - std::vector depth_to_image_scales = {1.0, haz_cam_depth_to_image_scale, 1.0}; - depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); - depth_to_image_noscale.push_back(haz_cam_depth_to_image_noscale); - depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); - - // Put in arrays, so we can optimize them - std::vector depth_to_image_noscale_vec(num_cam_types * num_depth_params); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { - if (FLAGS_affine_depth_to_image) - dense_map::affine_transform_to_array - (depth_to_image_noscale[cam_type], - &depth_to_image_noscale_vec[num_depth_params * cam_type]); - else - dense_map::rigid_transform_to_array - (depth_to_image_noscale[cam_type], - &depth_to_image_noscale_vec[num_depth_params * cam_type]); - } - - // Put the intrinsics in arrays - std::vector focal_lengths(num_cam_types); - std::vector optical_centers(num_cam_types); - std::vector distortions(num_cam_types); - for (int it = 0; it < num_cam_types; it++) { - focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths - optical_centers[it] = cam_params[it].GetOpticalOffset(); - - if (cam_params[it].GetDistortion().size() == 0) - LOG(FATAL) << "The cameras are expected to have distortion."; - distortions[it] = cam_params[it].GetDistortion(); - } - - // Build a map for quick access for all the messages we may need - // TODO(oalexan1): Must the view be kept open for this to work? - std::vector topics; - for (auto it = 0; it < image_topics.size(); it++) - if (image_topics[it] != "") topics.push_back(image_topics[it]); - for (auto it = 0; it < depth_topics.size(); it++) - if (depth_topics[it] != "") topics.push_back(depth_topics[it]); - rosbag::Bag bag; - bag.open(FLAGS_ros_bag, rosbag::bagmode::Read); - rosbag::View view(bag, rosbag::TopicQuery(topics)); - std::map> bag_map; - dense_map::indexMessages(view, bag_map); - - // A lot of care is needed here. This remembers how we travel in time - // for each camera type so we have fewer messages to search. - // But if a mistake is done below it will mess up this bookkeeping. - std::vector image_start_positions(num_cam_types, 0); - std::vector cloud_start_positions(num_cam_types, 0); - - // Cannot add a (positive) value more this to - // ref_to_cam_timestamp_offsets[cam_type] before getting out of the - // bracket. - std::vector upper_bound(num_cam_types, 1.0e+100); - - // Cannot add a (negative) value less than this to - // ref_to_cam_timestamp_offsets[cam_type] before getting out of the - // bracket. - std::vector lower_bound(num_cam_types, -1.0e+100); - - std::cout << "Bracketing the images in time." << std::endl; - - // Populate the data for each camera image - std::vector cams; - for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { - if (ref_cam_type != 0) - LOG(FATAL) << "It is assumed that the ref cam type is 0."; - - for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { - dense_map::cameraImage cam; - bool success = false; - if (cam_type == ref_cam_type) { - cam.camera_type = cam_type; - cam.timestamp = ref_timestamps[ref_it]; - cam.ref_timestamp = cam.timestamp; // the time offset is 0 between ref and itself - cam.beg_ref_index = ref_it; - cam.end_ref_index = ref_it; - - // Search the whole set of timestamps, so set start_pos = - // 0. This is slower but more robust than keeping track of how - // we move in the increasing order of time. - int start_pos = 0; - bool save_grayscale = true; // for matching we will need grayscale - double found_time = -1.0; - // this has to succeed since we picked the ref images in the map - if (!dense_map::lookupImage(cam.timestamp, bag_map[image_topics[cam_type]], save_grayscale, - // outputs - cam.image, image_start_positions[cam_type], // care here - found_time)) - LOG(FATAL) << std::fixed << std::setprecision(17) - << "Cannot look up camera at time " << cam.timestamp << ".\n"; - - // The exact time is expected - if (found_time != cam.timestamp) - LOG(FATAL) << std::fixed << std::setprecision(17) - << "Cannot look up camera at time " << cam.timestamp << ".\n"; - - // See if to skip this timestamp - if (!cam_timestamps_to_use[cam_type].empty() && - cam_timestamps_to_use[cam_type].find(cam.timestamp) == - cam_timestamps_to_use[cam_type].end()) - continue; - - success = true; - - } else { - if (ref_it + 1 >= num_ref_cams) break; // Arrived at the end, cannot do a bracket - - // Convert the bracketing timestamps to current cam's time - double ref_to_cam_offset = ref_to_cam_timestamp_offsets[cam_type]; - double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_offset; - double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_offset; - - if (right_timestamp <= left_timestamp) - LOG(FATAL) << "Ref timestamps must be in increasing order.\n"; - - if (right_timestamp - left_timestamp > FLAGS_bracket_len) - continue; // Must respect the bracket length - - // Find the image timestamp closest to the midpoint of the brackets. This will give - // more room to vary the timestamp later. - double mid_timestamp = (left_timestamp + right_timestamp)/2.0; - - // Search forward in time from image_start_positions[cam_type]. - // We will update that too later. One has to be very careful - // with it so it does not go too far forward in time - // so that at the next iteration we are passed what we - // search for. - int start_pos = image_start_positions[cam_type]; // care here - bool save_grayscale = true; // for matching we will need grayscale - double curr_timestamp = left_timestamp; // start here - cv::Mat best_image; - double best_dist = 1.0e+100; - double best_time = -1.0, found_time = -1.0; - while (1) { - if (found_time >= right_timestamp) break; // out of range - - cv::Mat image; - if (!dense_map::lookupImage(curr_timestamp, bag_map[image_topics[cam_type]], - save_grayscale, - // outputs - image, - start_pos, // care here - found_time)) - break; // Need not succeed, but then there's no need to go on are we are at the end - - double curr_dist = std::abs(found_time - mid_timestamp); - if (curr_dist < best_dist) { - best_dist = curr_dist; - best_time = found_time; - // Update the start position for the future only if this is a good - // solution. Otherwise we may have moved too far. - image_start_positions[cam_type] = start_pos; - image.copyTo(best_image); - } - - // Go forward in time. We count on the fact that - // lookupImage() looks forward from given guess. - curr_timestamp = std::nextafter(found_time, 1.01 * found_time); - } - - if (best_time < 0.0) continue; // bracketing failed - - // Note how we allow best_time == left_timestamp if there's no other choice - if (best_time < left_timestamp || best_time >= right_timestamp) continue; // no luck - - cam.camera_type = cam_type; - cam.timestamp = best_time; - cam.ref_timestamp = best_time - ref_to_cam_offset; - cam.beg_ref_index = ref_it; - cam.end_ref_index = ref_it + 1; - cam.image = best_image; - - upper_bound[cam_type] = std::min(upper_bound[cam_type], best_time - left_timestamp); - lower_bound[cam_type] = std::max(lower_bound[cam_type], best_time - right_timestamp); - - // See if to skip this timestamp - if (!cam_timestamps_to_use[cam_type].empty() && - cam_timestamps_to_use[cam_type].find(cam.timestamp) == - cam_timestamps_to_use[cam_type].end()) - continue; - - success = true; - } - - if (!success) continue; - - // This can be useful in checking if all the sci cams were bracketed successfully. - // std::cout << std::setprecision(17) << std::fixed << "For camera " - // << cam_names[cam_type] << " pick timestamp " - // << cam.timestamp << ".\n"; - - if (depth_topics[cam_type] != "") { - cam.cloud_timestamp = -1.0; // will change - cv::Mat cloud; - // Look up the closest cloud in time (either before or after cam.timestamp) - // This need not succeed. - dense_map::lookupCloud(cam.timestamp, bag_map[depth_topics[cam_type]], - FLAGS_max_haz_cam_image_to_depth_timestamp_diff, - // Outputs - cam.depth_cloud, - cloud_start_positions[cam_type], // care here - cam.cloud_timestamp); - } - - cams.push_back(cam); - } // end loop over camera types - } // end loop over ref images - - std::cout << "Allowed timestamp offset range while respecting the brackets for given cameras:\n"; - for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { - if (cam_type == ref_cam_type) continue; // bounds don't make sense here - - // So far we had the relative change. Now add the actual offset to get the max allowed offset. - lower_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; - upper_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; - - std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] - << ", " << upper_bound[cam_type] << "]\n"; - } - - if (FLAGS_float_timestamp_offsets) { - std::cout << "Given the user constraint the timestamp offsets will float in these ranges:\n"; - for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { - if (cam_type == ref_cam_type) continue; // bounds don't make sense here - - lower_bound[cam_type] = std::max(lower_bound[cam_type], - ref_to_cam_timestamp_offsets[cam_type] - - FLAGS_timestamp_offsets_max_change); - - upper_bound[cam_type] = std::min(upper_bound[cam_type], - ref_to_cam_timestamp_offsets[cam_type] - + FLAGS_timestamp_offsets_max_change); - - // Tighten a bit to ensure we don't exceed things when we add - // and subtract timestamps later. Note that timestamps are - // measured in seconds and fractions of a second since epoch and - // can be quite large so precision loss can easily happen. - lower_bound[cam_type] += 1.0e-5; - upper_bound[cam_type] -= 1.0e-5; - - std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] - << ", " << upper_bound[cam_type] << "]\n"; - } - } - - // The images from the bag may need to be resized to be the same - // size as in the calibration file. Sometimes the full-res images - // can be so blurry that interest point matching fails, hence the - // resizing. - for (size_t it = 0; it < cam_params.size(); it++) - dense_map::adjustImageSize(cam_params[cams[it].camera_type], cams[it].image); - - // Sort by the timestamp in reference camera time. This is essential - // for matching each image to other images close in time. - std::sort(cams.begin(), cams.end(), dense_map::timestampLess); - - if (FLAGS_verbose) { - int count = 10000; - std::vector image_files; - for (size_t it = 0; it < cams.size(); it++) { - std::ostringstream oss; - oss << count << "_" << cams[it].camera_type << ".jpg"; - std::string name = oss.str(); - std::cout << "Writing: " << name << std::endl; - cv::imwrite(name, cams[it].image); - count++; - image_files.push_back(name); - } - } - - // Detect features using multiple threads. Too many threads may result - // in high memory usage. - int num_threads_val = std::min(8u, std::thread::hardware_concurrency()); - std::ostringstream oss; - oss << num_threads_val; - std::string num_threads = oss.str(); - google::SetCommandLineOption("num_threads", num_threads.c_str()); - if (!gflags::GetCommandLineOption("num_threads", &num_threads)) - LOG(FATAL) << "Failed to get the value of --num_threads in Astrobee software.\n"; - std::cout << "Using " << num_threads << " threads for feature detection/matching." << std::endl; - std::cout << "Note that this step uses a huge amount of memory." << std::endl; - - std::cout << "Detecting features." << std::endl; - - std::vector cid_to_descriptor_map; - std::vector cid_to_keypoint_map; - cid_to_descriptor_map.resize(cams.size()); - cid_to_keypoint_map.resize(cams.size()); - { - // Make the threadpool go out of scope when not needed to not use up memory - ff_common::ThreadPool thread_pool; - for (size_t it = 0; it < cams.size(); it++) { - thread_pool.AddTask(&dense_map::detectFeatures, cams[it].image, FLAGS_verbose, - &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); - } - thread_pool.Join(); - } - - dense_map::MATCH_MAP matches; - - std::vector > image_pairs; - for (size_t it1 = 0; it1 < cams.size(); it1++) { - for (size_t it2 = it1 + 1; it2 < std::min(cams.size(), it1 + FLAGS_num_overlaps + 1); it2++) { - image_pairs.push_back(std::make_pair(it1, it2)); - } - } - - { - // The transform from the world to every camera - std::vector world_to_cam; - calc_world_to_cam_transforms( // Inputs - cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, - // Output - world_to_cam); - - std::cout << "Matching features." << std::endl; - ff_common::ThreadPool thread_pool; - std::mutex match_mutex; - for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { - auto pair = image_pairs[pair_it]; - int left_image_it = pair.first, right_image_it = pair.second; - if (FLAGS_refiner_skip_filtering) { - thread_pool.AddTask(&dense_map::matchFeatures, - &match_mutex, - left_image_it, right_image_it, - cid_to_descriptor_map[left_image_it], - cid_to_descriptor_map[right_image_it], - cid_to_keypoint_map[left_image_it], - cid_to_keypoint_map[right_image_it], - FLAGS_verbose, &matches[pair]); - } else { - thread_pool.AddTask(&dense_map::matchFeaturesWithCams, - &match_mutex, - left_image_it, right_image_it, - cam_params[cams[left_image_it].camera_type], - cam_params[cams[right_image_it].camera_type], - world_to_cam[left_image_it], - world_to_cam[right_image_it], - FLAGS_initial_max_reprojection_error, - cid_to_descriptor_map[left_image_it], - cid_to_descriptor_map[right_image_it], - cid_to_keypoint_map[left_image_it], - cid_to_keypoint_map[right_image_it], - FLAGS_verbose, &matches[pair]); - } - } - thread_pool.Join(); - } - cid_to_descriptor_map = std::vector(); // Wipe, takes memory - - // If feature A in image I matches feather B in image J, which matches feature C in image K, - // then (A, B, C) belong together into a track. Build such a track. - - std::vector, int>> keypoint_map(cams.size()); - - // Give all interest points in a given image a unique id - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair const& index_pair = it->first; // alias - - int left_index = index_pair.first; - int right_index = index_pair.second; - - dense_map::MATCH_PAIR const& match_pair = it->second; // alias - std::vector const& left_ip_vec = match_pair.first; - std::vector const& right_ip_vec = match_pair.second; - for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { - auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); - auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); - keypoint_map[left_index][dist_left_ip] = 0; - keypoint_map[right_index][dist_right_ip] = 0; - } - } - - // Give all interest points in a given image a unique id - // And put them in a vector with the id corresponding to the interest point - std::vector>> keypoint_vec(cams.size()); - for (size_t cam_it = 0; cam_it < cams.size(); cam_it++) { - int count = 0; - for (auto ip_it = keypoint_map[cam_it].begin(); ip_it != keypoint_map[cam_it].end(); ip_it++) { - ip_it->second = count; - count++; - keypoint_vec[cam_it].push_back(ip_it->first); - } - } - - openMVG::matching::PairWiseMatches match_map; - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair const& index_pair = it->first; // alias - - int left_index = index_pair.first; - int right_index = index_pair.second; - - dense_map::MATCH_PAIR const& match_pair = it->second; // alias - std::vector const& left_ip_vec = match_pair.first; - std::vector const& right_ip_vec = match_pair.second; - - std::vector mvg_matches; - - for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { - auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); - auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); - - int left_id = keypoint_map[left_index][dist_left_ip]; - int right_id = keypoint_map[right_index][dist_right_ip]; - mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); - } - match_map[index_pair] = mvg_matches; - } - - if (FLAGS_verbose) { - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair index_pair = it->first; - dense_map::MATCH_PAIR const& match_pair = it->second; - - int left_index = index_pair.first; - int right_index = index_pair.second; - - std::ostringstream oss1; - oss1 << (10000 + left_index) << "_" << cams[left_index].camera_type; - - std::string left_stem = oss1.str(); - std::string left_image = left_stem + ".jpg"; - - std::ostringstream oss2; - oss2 << (10000 + right_index) << "_" << cams[right_index].camera_type; - - std::string right_stem = oss2.str(); - std::string right_image = right_stem + ".jpg"; - - std::string match_file = left_stem + "__" + right_stem + ".match"; - - std::cout << "Writing: " << left_image << ' ' << right_image << ' ' - << match_file << std::endl; - dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); - } - } - - // TODO(oalexan1): De-allocate all data when no longer need as - // the matching logic takes a lot of time. - - // not needed anymore and take up a lot of RAM - matches.clear(); matches = dense_map::MATCH_MAP(); - keypoint_map.clear(); keypoint_map.shrink_to_fit(); - cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); - - std::vector > pid_to_cid_fid; - { - // Build tracks - // De-allocate these as soon as not needed to save memory - openMVG::tracks::TracksBuilder trackBuilder; - trackBuilder.Build(match_map); // Build: Efficient fusion of correspondences - trackBuilder.Filter(); // Filter: Remove tracks that have conflict - // trackBuilder.ExportToStream(std::cout); - // Export tracks as a map (each entry is a sequence of imageId and featureIndex): - // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} - openMVG::tracks::STLMAPTracks map_tracks; - trackBuilder.ExportToSTL(map_tracks); - match_map = openMVG::matching::PairWiseMatches(); // wipe this, no longer needed - trackBuilder = openMVG::tracks::TracksBuilder(); // wipe it - - if (map_tracks.empty()) - LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; - - size_t num_elems = map_tracks.size(); - // Populate the filtered tracks - pid_to_cid_fid.clear(); - pid_to_cid_fid.resize(num_elems); - size_t curr_id = 0; - for (auto itr = map_tracks.begin(); itr != map_tracks.end(); itr++) { - for (auto itr2 = (itr->second).begin(); itr2 != (itr->second).end(); itr2++) { - pid_to_cid_fid[curr_id][itr2->first] = itr2->second; - } - curr_id++; - } - } - - // Set up the variable blocks to optimize for BracketedCamError - std::vector bracketed_cam_block_sizes; - int num_focal_lengths = 1; - int num_distortion_params = 1; // will be overwritten - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - bracketed_cam_block_sizes.push_back(num_focal_lengths); - bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); - bracketed_cam_block_sizes.push_back(num_distortion_params); // will be modified later - - // Set up the variable blocks to optimize for RefCamError - std::vector ref_cam_block_sizes; - ref_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - ref_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - ref_cam_block_sizes.push_back(num_focal_lengths); - ref_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); - ref_cam_block_sizes.push_back(num_distortion_params); // will be modified later - - // Set up variable blocks to optimize for BracketedDepthError - std::vector bracketed_depth_block_sizes; - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(num_depth_params); - bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - - // Set up the variable blocks to optimize for BracketedDepthMeshError - std::vector bracketed_depth_mesh_block_sizes; - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_mesh_block_sizes.push_back(num_depth_params); - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - - // Set up the variable blocks to optimize for RefDepthError - std::vector ref_depth_block_sizes; - ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - ref_depth_block_sizes.push_back(num_depth_params); - ref_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - ref_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - - // Set up the variable blocks to optimize for the mesh xyz - std::vector mesh_block_sizes; - mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - - // For a given fid = pid_to_cid_fid[pid][cid], the value - // pid_cid_fid_inlier[pid][cid][fid] will be non-zero only if this - // pixel is an inlier. Originally all pixels are inliers. Once an - // inlier becomes an outlier, it never becomes an inlier again. - std::vector>> pid_cid_fid_inlier; - if (!FLAGS_refiner_skip_filtering) { - pid_cid_fid_inlier.resize(pid_to_cid_fid.size()); - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - pid_cid_fid_inlier[pid][cid][fid] = 1; - } - } - } - - for (int pass = 0; pass < FLAGS_refiner_num_passes; pass++) { - std::cout << "\nOptimization pass " << pass + 1 << " / " << FLAGS_refiner_num_passes << "\n"; - - // The transforms from the world to all cameras must be updated - // given the current state of optimization - std::vector world_to_cam; - calc_world_to_cam_transforms( // Inputs - cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, - // Output - world_to_cam); - - // Do multiview triangulation - std::vector xyz_vec(pid_to_cid_fid.size()); - - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - std::vector focal_length_vec; - std::vector world_to_cam_vec; - std::vector pix_vec; - - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Triangulate inliers only - if (!FLAGS_refiner_skip_filtering && - !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) - continue; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - - if (cams[cid].camera_type == ref_cam_type) { - // Flag as outliers pixels at the nav_cam boundary, if desired. This - // is especially important when the nav_cam uses the radtan - // model instead of fisheye. - Eigen::Vector2i dist_size = cam_params[cams[cid].camera_type].GetDistortedSize(); - int excl = FLAGS_nav_cam_num_exclude_boundary_pixels; - if (dist_ip.x() < excl || dist_ip.x() > dist_size[0] - 1 - excl || - dist_ip.y() < excl || dist_ip.y() > dist_size[1] - 1 - excl) { - dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); - continue; - } - } - - Eigen::Vector2d undist_ip; - cam_params[cams[cid].camera_type].Convert - (dist_ip, &undist_ip); - - focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); - world_to_cam_vec.push_back(world_to_cam[cid]); - pix_vec.push_back(undist_ip); - } - - if (!FLAGS_refiner_skip_filtering && pix_vec.size() < 2) { - // If after outlier filtering less than two rays are left, can't triangulate. - // Must set all features for this pid to outliers. - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); - } - - // Nothing else to do - continue; - } - - // Triangulate n rays emanating from given undistorted and centered pixels - xyz_vec[pid] = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); - } - - if (pass == 0 && nav_cam_distortion_replacement.size() > 1) { - // At the first pass, right after triangulation is done with a - // given nav cam model, which presumably was pretty accurate, - // replace its distortion if desired, which we will then - // optimize. - std::cout << "Setting nav cam distortion to: " << nav_cam_distortion_replacement.transpose() - << ". Will proceed to optimize it.\n"; - cam_params[ref_cam_type].SetDistortion(nav_cam_distortion_replacement); - distortions[ref_cam_type] = cam_params[ref_cam_type].GetDistortion(); - } - - // For a given fid = pid_to_cid_fid[pid][cid], - // the value pid_cid_fid_to_residual[pid][cid][fid] will be the index in the array - // of residuals (look only at pixel residuals). This structure is populated only for - // inliers, so its size changes at each pass. - std::vector>> pid_cid_fid_to_residual; - if (!FLAGS_refiner_skip_filtering) pid_cid_fid_to_residual.resize(pid_to_cid_fid.size()); - - // Form the problem - ceres::Problem problem; - std::vector residual_names; - std::vector residual_scales; - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - for (auto cid_fid = pid_to_cid_fid[pid].begin(); - cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Deal with inliers only - if (!FLAGS_refiner_skip_filtering && - !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) - continue; - - int cam_type = cams[cid].camera_type; - int beg_ref_index = cams[cid].beg_ref_index; - int end_ref_index = cams[cid].end_ref_index; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - - if (cam_type == ref_cam_type) { - // The cost function of projecting in the ref cam. - ceres::CostFunction* ref_cost_function = - dense_map::RefCamError::Create(dist_ip, ref_cam_block_sizes, - cam_params[cam_type]); - ceres::LossFunction* ref_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - // Remember the index of the residuals about to create - if (!FLAGS_refiner_skip_filtering) - pid_cid_fid_to_residual[pid][cid][fid] = residual_names.size(); - - residual_names.push_back(cam_names[cam_type] + "_pix_x"); - residual_names.push_back(cam_names[cam_type] + "_pix_y"); - residual_scales.push_back(1.0); - residual_scales.push_back(1.0); - problem.AddResidualBlock - (ref_cost_function, ref_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &xyz_vec[pid][0], - &focal_lengths[cam_type], - &optical_centers[cam_type][0], - &distortions[cam_type][0]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - } - - Eigen::Vector3d depth_xyz(0, 0, 0); - if (FLAGS_depth_tri_weight == 0 || - !dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) - continue; // could not look up the depth value - - // The constraint that the depth points agree with triangulated points - ceres::CostFunction* ref_depth_cost_function - = dense_map::RefDepthError::Create(FLAGS_depth_tri_weight, - depth_xyz, - ref_depth_block_sizes); - - ceres::LossFunction* ref_depth_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - residual_names.push_back("depth_tri_x_m"); - residual_names.push_back("depth_tri_y_m"); - residual_names.push_back("depth_tri_z_m"); - residual_scales.push_back(FLAGS_depth_tri_weight); - residual_scales.push_back(FLAGS_depth_tri_weight); - residual_scales.push_back(FLAGS_depth_tri_weight); - problem.AddResidualBlock - (ref_depth_cost_function, - ref_depth_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &depth_to_image_noscale_vec[num_depth_params * cam_type], - &depth_to_image_scales[cam_type], - &xyz_vec[pid][0]); - - if (!FLAGS_float_sparse_map) - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) - problem.SetParameterBlockConstant - (&depth_to_image_noscale_vec[num_depth_params * cam_type]); - - } else { - // Other cameras, which need bracketing - ceres::CostFunction* bracketed_cost_function = - dense_map::BracketedCamError::Create(dist_ip, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_cam_block_sizes, - cam_params[cam_type]); - ceres::LossFunction* bracketed_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - // Remember the index of the residuals about to create - if (!FLAGS_refiner_skip_filtering) - pid_cid_fid_to_residual[pid][cid][fid] = residual_names.size(); - - residual_names.push_back(cam_names[cam_type] + "_pix_x"); - residual_names.push_back(cam_names[cam_type] + "_pix_y"); - residual_scales.push_back(1.0); - residual_scales.push_back(1.0); - problem.AddResidualBlock - (bracketed_cost_function, bracketed_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &xyz_vec[pid][0], - &ref_to_cam_timestamp_offsets[cam_type], - &focal_lengths[cam_type], - &optical_centers[cam_type][0], - &distortions[cam_type][0]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } - if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { - problem.SetParameterBlockConstant - (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - } - - Eigen::Vector3d depth_xyz(0, 0, 0); - if (FLAGS_depth_tri_weight == 0 || - !dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) - continue; // could not look up the depth value - - // Ensure that the depth points agree with triangulated points - ceres::CostFunction* bracketed_depth_cost_function - = dense_map::BracketedDepthError::Create(FLAGS_depth_tri_weight, - depth_xyz, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_depth_block_sizes); - - ceres::LossFunction* bracketed_depth_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - residual_names.push_back("depth_tri_x_m"); - residual_names.push_back("depth_tri_y_m"); - residual_names.push_back("depth_tri_z_m"); - residual_scales.push_back(FLAGS_depth_tri_weight); - residual_scales.push_back(FLAGS_depth_tri_weight); - residual_scales.push_back(FLAGS_depth_tri_weight); - problem.AddResidualBlock - (bracketed_depth_cost_function, - bracketed_depth_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &depth_to_image_noscale_vec[num_depth_params * cam_type], - &depth_to_image_scales[cam_type], - &xyz_vec[pid][0], - &ref_to_cam_timestamp_offsets[cam_type]); - - // TODO(oalexan1): This code repeats too much. Need to keep a hash - // of sparse map pointers. - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } - if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } - if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { - problem.SetParameterBlockConstant - (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - } - if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) - problem.SetParameterBlockConstant - (&depth_to_image_noscale_vec[num_depth_params * cam_type]); - } - } - } - - if (FLAGS_mesh != "") { - // Add the mesh constraint - - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - Eigen::Vector3d mesh_xyz(0, 0, 0); - int num_intersections = 0; // Number of mesh intersections given the rays for this pid - - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Deal with inliers only - if (!FLAGS_refiner_skip_filtering && - !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) - continue; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - Eigen::Vector2d undist_ip; - cam_params[cams[cid].camera_type].Convert - (dist_ip, &undist_ip); - - // Intersect the ray with the mesh - bool have_mesh_intersection = false; - Eigen::Vector3d mesh_intersect(0.0, 0.0, 0.0); - have_mesh_intersection - = dense_map::ray_mesh_intersect(undist_ip, cam_params[cams[cid].camera_type], - world_to_cam[cid], mesh, bvh_tree, - FLAGS_min_ray_dist, FLAGS_max_ray_dist, - // Output - mesh_intersect); - - if (have_mesh_intersection) { - mesh_xyz += mesh_intersect; - num_intersections += 1; - } - - if (FLAGS_depth_mesh_weight > 0 && have_mesh_intersection) { - // Try to make each mesh intersection agree with corresponding depth measurement, - // if it exists - Eigen::Vector3d depth_xyz(0, 0, 0); - if (dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) { - int cam_type = cams[cid].camera_type; - int beg_ref_index = cams[cid].beg_ref_index; - int end_ref_index = cams[cid].end_ref_index; - if (cam_type != ref_cam_type) { - // TODO(oalexan1): What to do when cam_type == ref_cam_type? - // Ensure that the depth points agree with mesh points - ceres::CostFunction* bracketed_depth_mesh_cost_function - = dense_map::BracketedDepthMeshError::Create(FLAGS_depth_mesh_weight, - depth_xyz, - mesh_intersect, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_depth_mesh_block_sizes); - - ceres::LossFunction* bracketed_depth_mesh_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - residual_names.push_back("depth_mesh_x_m"); - residual_names.push_back("depth_mesh_y_m"); - residual_names.push_back("depth_mesh_z_m"); - residual_scales.push_back(FLAGS_depth_mesh_weight); - residual_scales.push_back(FLAGS_depth_mesh_weight); - residual_scales.push_back(FLAGS_depth_mesh_weight); - problem.AddResidualBlock - (bracketed_depth_mesh_cost_function, - bracketed_depth_mesh_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &depth_to_image_noscale_vec[num_depth_params * cam_type], - &depth_to_image_scales[cam_type], - &ref_to_cam_timestamp_offsets[cam_type]); - - // TODO(oalexan1): This code repeats too much. Need to keep a hash - // of sparse map pointers. - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } - if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } - if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { - problem.SetParameterBlockConstant - (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - } - if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) - problem.SetParameterBlockConstant - (&depth_to_image_noscale_vec[num_depth_params * cam_type]); - } - } - } - } - - if (num_intersections >= 1 && FLAGS_mesh_tri_weight > 0) { - // Average the intersections of all rays with the mesh, and try to make - // the triangulated point agree with the mesh intersection - - mesh_xyz /= num_intersections; - - ceres::CostFunction* mesh_cost_function = - dense_map::XYZError::Create(mesh_xyz, mesh_block_sizes, FLAGS_mesh_tri_weight); - - ceres::LossFunction* mesh_loss_function = - dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, - &xyz_vec[pid][0]); - - residual_names.push_back("mesh_tri_x_m"); - residual_names.push_back("mesh_tri_y_m"); - residual_names.push_back("mesh_tri_z_m"); - - residual_scales.push_back(FLAGS_mesh_tri_weight); - residual_scales.push_back(FLAGS_mesh_tri_weight); - residual_scales.push_back(FLAGS_mesh_tri_weight); - } - } - } - - // See which intrinsics from which cam to float or keep fixed - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { - if (intrinsics_to_float[cam_type].find("focal_length") == - intrinsics_to_float[cam_type].end()) { - // std::cout << "For " << cam_names[cam_type] << " not floating focal_length." << std::endl; - problem.SetParameterBlockConstant(&focal_lengths[cam_type]); - } else { - // std::cout << "For " << cam_names[cam_type] << " floating focal_length." << std::endl; - } - if (intrinsics_to_float[cam_type].find("optical_center") == - intrinsics_to_float[cam_type].end()) { - // std::cout << "For " << cam_names[cam_type] << " not floating optical_center." << - // std::endl; - problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); - } else { - // std::cout << "For " << cam_names[cam_type] << " floating optical_center." << std::endl; - } - if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) { - // std::cout << "For " << cam_names[cam_type] << " not floating distortion." << std::endl; - problem.SetParameterBlockConstant(&distortions[cam_type][0]); - } else { - // std::cout << "For " << cam_names[cam_type] << " floating distortion." << std::endl; - } - } - - // Evaluate the residuals before optimization - double total_cost = 0.0; - std::vector residuals; - ceres::Problem::EvaluateOptions eval_options; - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "There must be as many residual names as residual values."; - if (residuals.size() != residual_scales.size()) - LOG(FATAL) << "There must be as many residual values as residual scales."; - for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale - residuals[it] /= residual_scales[it]; - dense_map::calc_median_residuals(residuals, residual_names, "before opt"); - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "Initial res " << residual_names[it] << " " << residuals[it] << std::endl; - } - - // Solve the problem - ceres::Solver::Options options; - ceres::Solver::Summary summary; - options.linear_solver_type = ceres::ITERATIVE_SCHUR; - options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread - options.max_num_iterations = FLAGS_num_iterations; - options.minimizer_progress_to_stdout = true; - options.gradient_tolerance = 1e-16; - options.function_tolerance = 1e-16; - options.parameter_tolerance = FLAGS_parameter_tolerance; - ceres::Solve(options, &problem, &summary); - - // The optimization is done. Right away copy the optimized states to where they belong to - // keep all data in sync. - - // Copy back the reference transforms - for (int cid = 0; cid < num_ref_cams; cid++) - dense_map::array_to_rigid_transform(world_to_ref_t[cid], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - // Copy back the optimized intrinsics - for (int it = 0; it < num_cam_types; it++) { - cam_params[it].SetFocalLength(Eigen::Vector2d(focal_lengths[it], focal_lengths[it])); - cam_params[it].SetOpticalOffset(optical_centers[it]); - cam_params[it].SetDistortion(distortions[it]); - } - - // If the nav cam did not get optimized, go back to the solution - // with two focal lengths, rather than the one with one focal length - // solved by this solver (as the average of the two). The two focal - // lengths are very similar, but it is not worth modifying the - // camera model we don't plan to optimize. - if (FLAGS_nav_cam_intrinsics_to_float == "" || FLAGS_num_iterations == 0) - cam_params[ref_cam_type] = orig_cam_params[ref_cam_type]; - - // Copy back the optimized extrinsics - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::array_to_rigid_transform - (ref_to_cam_trans[cam_type], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - - // Copy back the depth to image transforms without scales - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { - if (FLAGS_affine_depth_to_image) - dense_map::array_to_affine_transform - (depth_to_image_noscale[cam_type], - &depth_to_image_noscale_vec[num_depth_params * cam_type]); - else - dense_map::array_to_rigid_transform( - depth_to_image_noscale[cam_type], - &depth_to_image_noscale_vec[num_depth_params * cam_type]); - } - - // Evaluate the residuals after optimization - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "There must be as many residual names as residual values."; - if (residuals.size() != residual_scales.size()) - LOG(FATAL) << "There must be as many residual values as residual scales."; - for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale - residuals[it] /= residual_scales[it]; - dense_map::calc_median_residuals(residuals, residual_names, "after opt"); - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "Final res " << residual_names[it] << " " << residuals[it] << std::endl; - } - - // Flag outliers - if (!FLAGS_refiner_skip_filtering) { - // Before computing outliers by triangulation angle must recompute all the cameras, - // given the optimized parameters - calc_world_to_cam_transforms( // Inputs - cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, - // Output - world_to_cam); - - // Must deal with outliers by triangulation angle before - // removing outliers by reprojection error, as the latter will - // exclude some rays which form the given triangulated points. - int num_outliers_by_angle = 0, num_total_features = 0; - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - // Find the largest angle among any two intersecting rays - double max_rays_angle = 0.0; - - for (auto cid_fid1 = pid_to_cid_fid[pid].begin(); - cid_fid1 != pid_to_cid_fid[pid].end(); cid_fid1++) { - int cid1 = cid_fid1->first; - int fid1 = cid_fid1->second; - - // Deal with inliers only - if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid1, fid1)) continue; - - num_total_features++; - - Eigen::Vector3d cam_ctr1 = (world_to_cam[cid1].inverse()) * Eigen::Vector3d(0, 0, 0); - Eigen::Vector3d ray1 = xyz_vec[pid] - cam_ctr1; - ray1.normalize(); - - for (auto cid_fid2 = pid_to_cid_fid[pid].begin(); - cid_fid2 != pid_to_cid_fid[pid].end(); cid_fid2++) { - int cid2 = cid_fid2->first; - int fid2 = cid_fid2->second; - - // Look at each cid and next cids - if (cid2 <= cid1) - continue; - - // Deal with inliers only - if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid2, fid2)) continue; - - Eigen::Vector3d cam_ctr2 = (world_to_cam[cid2].inverse()) * Eigen::Vector3d(0, 0, 0); - Eigen::Vector3d ray2 = xyz_vec[pid] - cam_ctr2; - ray2.normalize(); - - double curr_angle = (180.0 / M_PI) * acos(ray1.dot(ray2)); - - if (std::isnan(curr_angle) || std::isinf(curr_angle)) continue; - - max_rays_angle = std::max(max_rays_angle, curr_angle); - } - } - - if (max_rays_angle >= FLAGS_refiner_min_angle) - continue; // This is a good triangulated point, with large angle of convergence - - // Flag as outliers all the features for this cid - for (auto cid_fid = pid_to_cid_fid[pid].begin(); - cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Deal with inliers only - if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; - - num_outliers_by_angle++; - dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); - } - } - std::cout << std::setprecision(4) << "Removed " << num_outliers_by_angle - << " outlier features with small angle of convergence, out of " - << num_total_features << " (" - << (100.0 * num_outliers_by_angle) / num_total_features << " %)\n"; - - int num_outliers_reproj = 0; - num_total_features = 0; // reusing this variable - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - for (auto cid_fid = pid_to_cid_fid[pid].begin(); - cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Deal with inliers only - if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; - - num_total_features++; - - // Find the pixel residuals - size_t residual_index = dense_map::getMapValue(pid_cid_fid_to_residual, pid, cid, fid); - if (residuals.size() <= residual_index + 1) LOG(FATAL) << "Too few residuals.\n"; - - double res_x = residuals[residual_index + 0]; - double res_y = residuals[residual_index + 1]; - // NaN values will never be inliers if the comparison is set as below - bool is_good = (Eigen::Vector2d(res_x, res_y).norm() <= FLAGS_max_reprojection_error); - if (!is_good) { - num_outliers_reproj++; - dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); - } - } - } - std::cout << std::setprecision(4) << "Removed " << num_outliers_reproj - << " outlier features using reprojection error, out of " << num_total_features - << " (" << (100.0 * num_outliers_reproj) / num_total_features << " %)\n"; - } - } - - // Update the optimized depth to image (for haz cam only) - int cam_type = 1; // haz cam - haz_cam_depth_to_image_scale = depth_to_image_scales[cam_type]; - haz_cam_depth_to_image_transform = depth_to_image_noscale[cam_type]; - haz_cam_depth_to_image_transform.linear() *= haz_cam_depth_to_image_scale; - - dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", - cam_params, ref_to_cam_trans, - ref_to_cam_timestamp_offsets, - haz_cam_depth_to_image_transform); - - if (FLAGS_num_iterations > 0 && - (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "")) { - std::cout << "Either the sparse map intrinsics or cameras got modified. " - << "The map must be rebuilt." << std::endl; - - dense_map::RebuildMap(FLAGS_output_map, // Will be used for temporary saving of aux data - cam_params[ref_cam_type], sparse_map); - } - - std::cout << "Writing: " << FLAGS_output_map << std::endl; - sparse_map->Save(FLAGS_output_map); - - if (FLAGS_out_texture_dir != "") { - if (FLAGS_mesh == "") - LOG(FATAL) << "Cannot project camera images onto a mesh if a mesh was not provided.\n"; - - // The transform from the world to every camera - std::vector world_to_cam; - calc_world_to_cam_transforms( // Inputs - cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, - // Output - world_to_cam); - meshProjectCameras(cam_names, cam_params, cams, world_to_cam, mesh, bvh_tree, ref_cam_type, - FLAGS_nav_cam_num_exclude_boundary_pixels, FLAGS_out_texture_dir); - } - - return 0; -} // NOLINT - diff --git a/dense_map/geometry_mapper/tools/camera_refiner_old.cc b/dense_map/geometry_mapper/tools/camera_refiner_old.cc new file mode 100644 index 00000000..36e8520f --- /dev/null +++ b/dense_map/geometry_mapper/tools/camera_refiner_old.cc @@ -0,0 +1,2268 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +// This is an old version of camera_refiner which could only match each sci and haz +// image to its two bracketing nav_cam images. It is still functional, hence it is kept, +// but the new camera_refiner performs better. It used to be invoked like this: +// +// $ISAAC_WS/devel/lib/geometry_mapper/camera_refiner \ +// --ros_bag mybag.bag --sparse_map mymap.map \ +// --num_iterations 20 --bracket_len 0.6 \ +// --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ +// --output_map output_map.map \ +// --fix_map --skip_registration --float_scale \ +// --timestamp_interpolation --robust_threshold 3 \ +// --sci_cam_intrinsics_to_float \ +// 'focal_length optical_center distortion' \ +// --mesh mesh.ply --mesh_weight 25 \ +// --mesh_robust_threshold 3 + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and full-resolution sci_cam data."); + +DEFINE_string(sparse_map, "", + "A registered SURF sparse map made with some of the ROS bag data, " + "and including nav cam images closely bracketing the sci cam images."); + +DEFINE_string(output_map, "", "Output file containing the updated map."); + +DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); + +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); + +DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", + "The depth camera intensity topic in the bag file."); + +DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); + +DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); + +DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); + +DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, + "Use depth haz cam clouds that are within this distance in " + "time from the nearest haz cam intensity image."); + +DEFINE_double(robust_threshold, 3.0, + "Pixel errors much larger than this will be exponentially attenuated " + "to affect less the cost function."); + +DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calibration."); + +DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by less than this."); + +DEFINE_double(bracket_len, 2.0, + "Lookup sci and haz cam images only between consecutive nav cam images " + "whose distance in time is no more than this (in seconds), after adjusting " + "for the timestamp offset between these cameras. It is assumed the robot " + "moves slowly and uniformly during this time."); + +DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); + +DEFINE_string(hugin_file, "", "The path to the hugin .pto file used for sparse map registration."); + +DEFINE_string(xyz_file, "", "The path to the xyz file used for sparse map registration."); + +DEFINE_bool(timestamp_interpolation, false, + "If true, interpolate between " + "timestamps. May give better results if the robot is known to move " + "uniformly, and perhaps less so for stop-and-go robot motion."); + +DEFINE_string(sci_cam_timestamps, "", + "Use only these sci cam timestamps. Must be " + "a file with one timestamp per line."); + +DEFINE_bool(skip_registration, false, + "If true, do not re-register the optimized map. " + "Then the hugin and xyz file options need not be provided. " + "This may result in the scale not being correct if the sparse map is not fixed."); + +DEFINE_bool(opt_map_only, false, "If to optimize only the map and not the camera params."); + +DEFINE_bool(fix_map, false, "Do not optimize the sparse map, hence only the camera params."); + +DEFINE_bool(float_scale, false, + "If to optimize the scale of the clouds (use it if the " + "sparse map is kept fixed)."); + +DEFINE_string(sci_cam_intrinsics_to_float, "", + "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " + "optical_center, distortion. Specify as a quoted list. " + "For example: 'focal_length optical_center'."); + +DEFINE_double(nav_cam_to_sci_cam_offset_override_value, + std::numeric_limits::quiet_NaN(), + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " + "file with this value."); + +DEFINE_string(mesh, "", + "Refine the sci cam so that the sci cam texture agrees with the nav cam " + "texture when projected on this mesh."); + +DEFINE_double(mesh_weight, 25.0, + "A larger value will give more weight to the mesh constraint. " + "The mesh residuals printed at the end can be used to examine " + "the effect of this weight."); + +DEFINE_double(mesh_robust_threshold, 3.0, + "A larger value will try harder to minimize large divergences from " + "the mesh (but note that some of those may be outliers)."); + +DEFINE_bool(verbose, false, + "Print the residuals and save the images and match files." + "Stereo Pipeline's viewer can be used for visualizing these."); + +namespace dense_map { + +// TODO(oalexan1): Store separately matches which end up being +// squashed in a pid_cid_to_fid. + +ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { + // Convert to lower-case + std::transform(cost_fun.begin(), cost_fun.end(), cost_fun.begin(), ::tolower); + + ceres::LossFunction* loss_function = NULL; + if (cost_fun == "l2") + loss_function = NULL; + else if (cost_fun == "huber") + loss_function = new ceres::HuberLoss(th); + else if (cost_fun == "cauchy") + loss_function = new ceres::CauchyLoss(th); + else if (cost_fun == "l1") + loss_function = new ceres::SoftLOneLoss(th); + else + LOG(FATAL) << "Unknown cost function: " + cost_fun; + + return loss_function; +} + +// An error function minimizing the error of projection of haz cam +// depth points to the haz cam image. +struct DepthToHazError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + DepthToHazError(Eigen::Vector2d const& haz_pix, Eigen::Vector3d const& depth_xyz, std::vector const& block_sizes, + camera::CameraParameters const& haz_cam_params) + : m_haz_pix(haz_pix), m_depth_xyz(depth_xyz), m_block_sizes(block_sizes), m_haz_cam_params(haz_cam_params) { + // Sanity check. + if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "DepthToHazError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + // Takes array of arrays as parameters. + bool operator()(double const* const* parameters, double* residuals) const { + // Populate the intrinsics + + // The current transform from the depth point cloud to the image + Eigen::Affine3d depth_to_image; + array_to_rigid_transform(depth_to_image, parameters[0]); + + // Apply the scale + double depth_to_image_scale = parameters[1][0]; + depth_to_image.linear() *= depth_to_image_scale; + + // Convert from depth cloud coordinates to haz cam coordinates + Eigen::Vector3d X = depth_to_image * m_depth_xyz; + + // Project into the camera + Eigen::Vector2d undist_pix = m_haz_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + // Eigen::Vector2d dist_pix; + // m_haz_cam_params.Convert(undist_pix, &dist_pix); + + // Compute the residuals + residuals[0] = undist_pix[0] - m_haz_pix[0]; + residuals[1] = undist_pix[1] - m_haz_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, + std::vector const& block_sizes, + camera::CameraParameters const& haz_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction( + new DepthToHazError(nav_pix, depth_xyz, block_sizes, haz_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_haz_pix; // The pixel observation + Eigen::Vector3d m_depth_xyz; // The measured position + std::vector m_block_sizes; + camera::CameraParameters m_haz_cam_params; +}; // End class DepthToHazError + +// An error function minimizing the error of projection of a point in the nav cam +// image. Both the point and the camera pose are variables of optimization. +struct NavError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + NavError(Eigen::Vector2d const& nav_pix, std::vector const& block_sizes, + camera::CameraParameters const& nav_cam_params) + : m_nav_pix(nav_pix), m_block_sizes(block_sizes), m_nav_cam_params(nav_cam_params) { + // Sanity check + if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_XYZ_PARAMS) { + LOG(FATAL) << "NavError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + // Takes array of arrays as parameters. + bool operator()(double const* const* parameters, double* residuals) const { + // Populate the intrinsics + + Eigen::Affine3d world_to_nav_cam; + array_to_rigid_transform(world_to_nav_cam, parameters[0]); + + Eigen::Vector3d xyz; + for (int it = 0; it < NUM_XYZ_PARAMS; it++) xyz[it] = parameters[1][it]; + + // Convert to camera coordinates + Eigen::Vector3d X = world_to_nav_cam * xyz; + + // Project into the camera + Eigen::Vector2d undist_pix = m_nav_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + // Eigen::Vector2d dist_pix; + // m_nav_cam_params.Convert(undist_pix, &dist_pix); + + // Compute the residuals + residuals[0] = undist_pix[0] - m_nav_pix[0]; + residuals[1] = undist_pix[1] - m_nav_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, std::vector const& block_sizes, + camera::CameraParameters const& nav_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction(new NavError(nav_pix, block_sizes, nav_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_nav_pix; // The pixel observation + std::vector m_block_sizes; + camera::CameraParameters m_nav_cam_params; +}; // End class NavError + +// An error function minimizing the error of transforming depth points +// first to haz cam image coordinates, then to nav cam coordinates +// (via time interpolation) then to world coordinates, then by +// projecting into the nav cam image having a match point with the +// original haz cam image. This is a little tricky to follow, because +// each haz cam image is bound in time by two nav cam images. +struct DepthToNavError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + DepthToNavError(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, + double alpha, // used for interpolation + bool match_left, std::vector const& block_sizes, camera::CameraParameters const& nav_cam_params) + : m_nav_pix(nav_pix), + m_depth_xyz(depth_xyz), + m_alpha(alpha), + m_match_left(match_left), + m_block_sizes(block_sizes), + m_nav_cam_params(nav_cam_params) { + // Sanity check. + if (m_block_sizes.size() != 5 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || + m_block_sizes[4] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "DepthToNavError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // As mentioned before, we have to deal with four transforms + + Eigen::Affine3d left_nav_trans; + array_to_rigid_transform(left_nav_trans, parameters[0]); + + Eigen::Affine3d right_nav_trans; + array_to_rigid_transform(right_nav_trans, parameters[1]); + + Eigen::Affine3d hazcam_to_navcam_trans; + array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + + // The haz cam depth to image transform, which has a fixed scale + Eigen::Affine3d hazcam_depth_to_image_trans; + array_to_rigid_transform(hazcam_depth_to_image_trans, parameters[3]); + double depth_to_image_scale = parameters[4][0]; + hazcam_depth_to_image_trans.linear() *= depth_to_image_scale; + + // Convert from depth cloud coordinates to haz cam coordinates + Eigen::Vector3d X = hazcam_depth_to_image_trans * m_depth_xyz; + + // Convert to nav cam coordinates + X = hazcam_to_navcam_trans * X; + + // The haz cam to nav cam transform at the haz cam time is obtained + // by interpolation in time + Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(m_alpha, left_nav_trans, right_nav_trans); + + // Convert to world coordinates + X = interp_world_to_nav_trans.inverse() * X; + + // Transform to either the left or right nav camera coordinates, + // depending on for which one we managed to find a match with he + // haz cam image + if (m_match_left) { + X = left_nav_trans * X; + } else { + X = right_nav_trans * X; + } + + // Project into the image + Eigen::Vector2d undist_pix = m_nav_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + // Eigen::Vector2d dist_pix; + // m_nav_cam_params.Convert(undist_pix, &dist_pix); + + // Compute the residuals + residuals[0] = undist_pix[0] - m_nav_pix[0]; + residuals[1] = undist_pix[1] - m_nav_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, double alpha, + bool match_left, std::vector const& block_sizes, + camera::CameraParameters const& nav_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction( + new DepthToNavError(nav_pix, depth_xyz, alpha, match_left, block_sizes, nav_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_nav_pix; // The nav cam pixel observation + Eigen::Vector3d m_depth_xyz; // The measured position in the depth camera + double m_alpha; + bool m_match_left; + std::vector m_block_sizes; + camera::CameraParameters m_nav_cam_params; +}; // End class DepthToNavError + +// An error function minimizing the error of transforming depth points at haz cam +// time through enough coordinate systems until it can project +// into the sci cam at the sci cam time +struct DepthToSciError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + DepthToSciError(Eigen::Vector2d const& dist_sci_pix, Eigen::Vector3d const& depth_xyz, double alpha_haz, + double alpha_sci, // used for interpolation + std::vector const& block_sizes, camera::CameraParameters const& sci_cam_params) + : m_dist_sci_pix(dist_sci_pix), + m_depth_xyz(depth_xyz), + m_alpha_haz(alpha_haz), + m_alpha_sci(alpha_sci), + m_block_sizes(block_sizes), + m_sci_cam_params(sci_cam_params), + m_num_focal_lengths(1) { + // Sanity check. + if (m_block_sizes.size() != 9 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || + m_block_sizes[4] != NUM_RIGID_PARAMS || m_block_sizes[5] != NUM_SCALAR_PARAMS || + m_block_sizes[6] != m_num_focal_lengths || m_block_sizes[7] != NUM_OPT_CTR_PARAMS || + m_block_sizes[8] != m_sci_cam_params.GetDistortion().size()) { + LOG(FATAL) << "DepthToSciError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Make a deep copy which we will modify + camera::CameraParameters sci_cam_params = m_sci_cam_params; + + // We have to deal with four transforms + Eigen::Affine3d left_nav_trans; + array_to_rigid_transform(left_nav_trans, parameters[0]); + + Eigen::Affine3d right_nav_trans; + array_to_rigid_transform(right_nav_trans, parameters[1]); + + Eigen::Affine3d hazcam_to_navcam_trans; + array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + + Eigen::Affine3d scicam_to_hazcam_trans; + array_to_rigid_transform(scicam_to_hazcam_trans, parameters[3]); + + // The haz cam depth to image transform, which has a fixed scale + Eigen::Affine3d hazcam_depth_to_image_trans; + array_to_rigid_transform(hazcam_depth_to_image_trans, parameters[4]); + double depth_to_image_scale = parameters[5][0]; + hazcam_depth_to_image_trans.linear() *= depth_to_image_scale; + + // Intrinsics, including a single focal length + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[6][0], parameters[6][0]); + Eigen::Vector2d optical_center(parameters[7][0], parameters[7][1]); + Eigen::VectorXd distortion(m_block_sizes[8]); + for (int i = 0; i < m_block_sizes[8]; i++) distortion[i] = parameters[8][i]; + sci_cam_params.SetFocalLength(focal_vector); + sci_cam_params.SetOpticalOffset(optical_center); + sci_cam_params.SetDistortion(distortion); + + // Convert from depth cloud coordinates to haz cam coordinates + Eigen::Vector3d X = hazcam_depth_to_image_trans * m_depth_xyz; + + // Convert to nav cam coordinates at haz cam time + X = hazcam_to_navcam_trans * X; + + // World to navcam at haz time + Eigen::Affine3d interp_world_to_nav_trans_haz_time = + dense_map::linearInterp(m_alpha_haz, left_nav_trans, right_nav_trans); + + // World to nav time at sci time + Eigen::Affine3d interp_world_to_nav_trans_sci_time = + dense_map::linearInterp(m_alpha_sci, left_nav_trans, right_nav_trans); + + // Convert to world coordinates + X = interp_world_to_nav_trans_haz_time.inverse() * X; + + // Convert to nav coordinates at sci cam time + X = interp_world_to_nav_trans_sci_time * X; + + // Convert to sci cam coordinates + X = scicam_to_hazcam_trans.inverse() * hazcam_to_navcam_trans.inverse() * X; + + // Convert to sci cam pix + Eigen::Vector2d undist_pix = sci_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + + // Apply distortion + Eigen::Vector2d comp_dist_sci_pix; + sci_cam_params.Convert(undist_pix, &comp_dist_sci_pix); + + // Compute the residuals + residuals[0] = comp_dist_sci_pix[0] - m_dist_sci_pix[0]; + residuals[1] = comp_dist_sci_pix[1] - m_dist_sci_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& dist_sci_pix, Eigen::Vector3d const& depth_xyz, + double alpha_haz, double alpha_sci, std::vector const& block_sizes, + camera::CameraParameters const& sci_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction( + new DepthToSciError(dist_sci_pix, depth_xyz, alpha_haz, alpha_sci, block_sizes, sci_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_dist_sci_pix; // The sci cam pixel observation + Eigen::Vector3d m_depth_xyz; // The measured position in the depth camera + double m_alpha_haz; + double m_alpha_sci; + std::vector m_block_sizes; + camera::CameraParameters m_sci_cam_params; + int m_num_focal_lengths; +}; // End class DepthToSciError + +// An error function projecting an xyz point in the sci cam +// bounded by two nav cams. +struct SciError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + SciError(Eigen::Vector2d const& dist_sci_pix, + double alpha, // used for interpolation + std::vector const& block_sizes, camera::CameraParameters const& sci_cam_params) + : m_dist_sci_pix(dist_sci_pix), + m_alpha(alpha), + m_block_sizes(block_sizes), + m_sci_cam_params(sci_cam_params), + m_num_focal_lengths(1) { + // Sanity check. + if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || + m_block_sizes[4] != NUM_XYZ_PARAMS || m_block_sizes[5] != m_num_focal_lengths || + m_block_sizes[6] != NUM_OPT_CTR_PARAMS || m_block_sizes[7] != m_sci_cam_params.GetDistortion().size()) { + LOG(FATAL) << "SciError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Make a deep copy which we will modify + camera::CameraParameters sci_cam_params = m_sci_cam_params; + + // We have to deal with four transforms + + Eigen::Affine3d left_nav_trans; + array_to_rigid_transform(left_nav_trans, parameters[0]); + + Eigen::Affine3d right_nav_trans; + array_to_rigid_transform(right_nav_trans, parameters[1]); + + Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(m_alpha, left_nav_trans, right_nav_trans); + + Eigen::Affine3d hazcam_to_navcam_trans; + array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + + Eigen::Affine3d scicam_to_hazcam_trans; + array_to_rigid_transform(scicam_to_hazcam_trans, parameters[3]); + + Eigen::Vector3d X; + for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[4][it]; + + // Intrinsics, including a single focal length + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); + Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); + Eigen::VectorXd distortion(m_block_sizes[7]); + for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; + sci_cam_params.SetFocalLength(focal_vector); + sci_cam_params.SetOpticalOffset(optical_center); + sci_cam_params.SetDistortion(distortion); + + // Find the sci cam to world transform + Eigen::Affine3d interp_world_to_sci_trans = + scicam_to_hazcam_trans.inverse() * hazcam_to_navcam_trans.inverse() * interp_world_to_nav_trans; + + // Project into the sci cam + Eigen::Vector3d sciX = interp_world_to_sci_trans * X; + Eigen::Vector2d undist_sci_pix = sci_cam_params.GetFocalVector().cwiseProduct(sciX.hnormalized()); + + // Convert to distorted pixel + Eigen::Vector2d comp_dist_sci_pix; + sci_cam_params.Convert(undist_sci_pix, &comp_dist_sci_pix); + + // Compute the residuals + residuals[0] = comp_dist_sci_pix[0] - m_dist_sci_pix[0]; + residuals[1] = comp_dist_sci_pix[1] - m_dist_sci_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& dist_sci_pix, double alpha, + std::vector const& block_sizes, + camera::CameraParameters const& sci_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction( + new SciError(dist_sci_pix, alpha, block_sizes, sci_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_dist_sci_pix; // The sci cam pixel observation + double m_alpha; + std::vector m_block_sizes; + camera::CameraParameters m_sci_cam_params; + int m_num_focal_lengths; +}; // End class SciError + +// An error function minimizing a weight times the distance from a +// variable xyz point to a fixed reference xyz point. +struct XYZError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + XYZError(Eigen::Vector3d const& ref_xyz, std::vector const& block_sizes, double weight) + : m_ref_xyz(ref_xyz), m_block_sizes(block_sizes), m_weight(weight) { + // Sanity check + if (m_block_sizes.size() != 1 || m_block_sizes[0] != NUM_XYZ_PARAMS) + LOG(FATAL) << "XYZError: The block sizes were not set up properly.\n"; + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + // Takes array of arrays as parameters. + // TODO(oalexan1): May want to use the analytical Ceres cost function + bool operator()(double const* const* parameters, double* residuals) const { + // Compute the residuals + for (int it = 0; it < NUM_XYZ_PARAMS; it++) residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector3d const& ref_xyz, + std::vector const& block_sizes, + double weight) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction(new XYZError(ref_xyz, block_sizes, weight)); + + // The residual size is always the same + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector3d m_ref_xyz; // reference xyz + std::vector m_block_sizes; + double m_weight; +}; // End class XYZError + +enum imgType { NAV_CAM, SCI_CAM, HAZ_CAM }; + +// Calculate the rmse residual for each residual type. +void calc_median_residuals(std::vector const& residuals, + std::vector const& residual_names, + std::string const& tag) { + size_t num = residuals.size(); + + if (num != residual_names.size()) + LOG(FATAL) << "There must be as many residuals as residual names."; + + std::map > stats; + for (size_t it = 0; it < residuals.size(); it++) stats[residual_names[it]] = std::vector(); // initialize + + for (size_t it = 0; it < residuals.size(); it++) + stats[residual_names[it]].push_back(std::abs(residuals[it])); + + std::cout << "The 25, 50 and 75 percentile residual stats " << tag << std::endl; + for (auto it = stats.begin(); it != stats.end(); it++) { + std::string const& name = it->first; + std::vector vals = stats[name]; // make a copy + std::sort(vals.begin(), vals.end()); + + int len = vals.size(); + + int it1 = static_cast(0.25 * len); + int it2 = static_cast(0.5 * len); + int it3 = static_cast(0.75 * len); + + if (len = 0) + std::cout << name << ": " + << "none" << std::endl; + else + std::cout << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3] << std::endl; + } +} + + // Intersect ray with mesh. Return true on success. + bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + double min_ray_dist, double max_ray_dist, + // Output + Eigen::Vector3d& intersection) { + // Initialize the output + intersection = Eigen::Vector3d(0.0, 0.0, 0.0); + + // Ray from camera going through the pixel + Eigen::Vector3d cam_ray(undist_pix.x() / cam_params.GetFocalVector()[0], + undist_pix.y() / cam_params.GetFocalVector()[1], 1.0); + cam_ray.normalize(); + + Eigen::Affine3d cam_to_world = world_to_cam.inverse(); + Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; + Eigen::Vector3d cam_ctr = cam_to_world.translation(); + + // Set up the ray structure for the mesh + BVHTree::Ray bvh_ray; + bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); + bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); + bvh_ray.dir.normalize(); + + bvh_ray.tmin = min_ray_dist; + bvh_ray.tmax = max_ray_dist; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (bvh_tree->intersect(bvh_ray, &hit)) { + double cam_to_mesh_dist = hit.t; + intersection = cam_ctr + cam_to_mesh_dist * world_ray; + return true; + } + + return false; + } + + // Prevent the linter from messing up with the beautiful formatting below + void add_haz_nav_cost // NOLINT + (// Inputs // NOLINT + int haz_it, int nav_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & haz_cam_intensity_timestamps, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::map const & haz_cam_to_left_nav_cam_index, // NOLINT + std::map const & haz_cam_to_right_nav_cam_index, // NOLINT + camera::CameraParameters const & nav_cam_params, // NOLINT + camera::CameraParameters const & haz_cam_params, // NOLINT + std::vector const & depth_to_nav_block_sizes, // NOLINT + std::vector const & depth_to_haz_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + // Outputs // NOLINT + std::vector & residual_names, // NOLINT + double & hazcam_depth_to_image_scale, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & hazcam_depth_to_image_vec, // NOLINT + ceres::Problem & problem) { // NOLINT + // Figure out the two nav cam indices bounding the current haz cam + // Must have sparse_map_timestamp + navcam_to_hazcam_timestamp_offset <= haz_timestamp + // which must be < next sparse_map_timestamp + navcam_to_hazcam_timestamp_offset. + bool match_left = false; + if (haz_cam_intensity_timestamps[haz_it] >= + sparse_map_timestamps[nav_cam_start + nav_it] + navcam_to_hazcam_timestamp_offset) { + match_left = true; + } else { + match_left = false; // match right then + } + + auto left_it = haz_cam_to_left_nav_cam_index.find(haz_it); + auto right_it = haz_cam_to_right_nav_cam_index.find(haz_it); + if (left_it == haz_cam_to_left_nav_cam_index.end() || + right_it == haz_cam_to_right_nav_cam_index.end()) + LOG(FATAL) << "Book-keeping error in add_haz_nav_cost."; + + int left_nav_it = left_it->second; + int right_nav_it = right_it->second; + + if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size()) || + haz_it >= static_cast(haz_cam_intensity_timestamps.size())) + LOG(FATAL) << "Book-keeping error 2."; + + // Left and right nav cam image time, in haz_cam's time measurement + double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] + + navcam_to_hazcam_timestamp_offset; + double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] + + navcam_to_hazcam_timestamp_offset; + double haz_time = haz_cam_intensity_timestamps[haz_it]; + + bool good = (left_time <= haz_time && haz_time < right_time); + + if (!good) LOG(FATAL) << "Book-keeping error 3."; + + // The current nav it better be either the left or right kind + if (nav_it != left_nav_it && nav_it != right_nav_it) LOG(FATAL) << "Book-keeping error 4."; + + // Find the transform from the world to nav cam at the haz cam time + Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; + Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; + double alpha = (haz_time - left_time) / (right_time - left_time); + if (right_time == left_time) alpha = 0.0; // handle division by zero + + if (!FLAGS_timestamp_interpolation) alpha = round(alpha); + + Eigen::Affine3d world_to_nav_trans_at_haz_time + = dense_map::linearInterp(alpha, left_nav_trans, right_nav_trans); + + std::vector const& haz_ip_vec = match_pair.first; // alias + std::vector const& nav_ip_vec = match_pair.second; // alias + + cv::Mat const& depth_cloud = depth_clouds[haz_it]; // alias + + for (size_t ip_it = 0; ip_it < haz_ip_vec.size(); ip_it++) { + // Find the haz cam depth measurement. Use nearest neighbor interpolation + // to look into the depth cloud. + int col = round(haz_ip_vec[ip_it].x); + int row = round(haz_ip_vec[ip_it].y); + + if (col < 0 || row < 0 || col >= depth_cloud.cols || row >= depth_cloud.rows) + LOG(FATAL) << "Book-keeping error 5."; + + // Skip any point that goes out of bounds due to rounding + if (col == depth_cloud.cols || row == depth_cloud.rows) continue; + + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) continue; + + // Convert to Eigen + Eigen::Vector3d depth_xyz(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + + Eigen::Vector2d undist_haz_ip; + Eigen::Vector2d undist_nav_ip; + { + // Make sure we don't use the distorted pixels from now on + Eigen::Vector2d haz_ip(haz_ip_vec[ip_it].x, haz_ip_vec[ip_it].y); + Eigen::Vector2d nav_ip(nav_ip_vec[ip_it].x, nav_ip_vec[ip_it].y); + haz_cam_params.Convert(haz_ip, &undist_haz_ip); + nav_cam_params.Convert(nav_ip, &undist_nav_ip); + } + + // Ensure the depth point projects well into the haz cam interest point + ceres::CostFunction* depth_to_haz_cost_function = + dense_map::DepthToHazError::Create(undist_haz_ip, depth_xyz, depth_to_haz_block_sizes, + haz_cam_params); + ceres::LossFunction* depth_to_haz_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + problem.AddResidualBlock(depth_to_haz_cost_function, depth_to_haz_loss_function, + &hazcam_depth_to_image_vec[0], + &hazcam_depth_to_image_scale); + residual_names.push_back("haznavhaz1"); + residual_names.push_back("haznavhaz2"); + + // Ensure that the depth points projects well in the nav cam interest point + ceres::CostFunction* depth_to_nav_cost_function + = dense_map::DepthToNavError::Create(undist_nav_ip, depth_xyz, alpha, match_left, + depth_to_nav_block_sizes, nav_cam_params); + ceres::LossFunction* depth_to_nav_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + int left_nav_index = NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it); + int right_nav_index = NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it); + problem.AddResidualBlock(depth_to_nav_cost_function, depth_to_nav_loss_function, + &nav_cam_vec[left_nav_index], + &nav_cam_vec[right_nav_index], + &hazcam_to_navcam_vec[0], &hazcam_depth_to_image_vec[0], + &hazcam_depth_to_image_scale); + residual_names.push_back("haznavnav1"); + residual_names.push_back("haznavnav2"); + + if (FLAGS_fix_map) { + problem.SetParameterBlockConstant(&nav_cam_vec[left_nav_index]); + problem.SetParameterBlockConstant(&nav_cam_vec[right_nav_index]); + } + } + + return; + } + + // Prevent the linter from messing up with the beautiful formatting below + void add_haz_sci_cost(// Inputs // NOLINT + int haz_it, int sci_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + double scicam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & haz_cam_intensity_timestamps, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::vector const & sci_cam_timestamps, // NOLINT + std::map const & haz_cam_to_left_nav_cam_index, // NOLINT + std::map const & haz_cam_to_right_nav_cam_index, // NOLINT + std::map const & sci_cam_to_left_nav_cam_index, // NOLINT + std::map const & sci_cam_to_right_nav_cam_index, // NOLINT + camera::CameraParameters const & sci_cam_params, // NOLINT + camera::CameraParameters const & haz_cam_params, // NOLINT + std::vector const & depth_to_sci_block_sizes, // NOLINT + std::vector const & depth_to_haz_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + // Outputs // NOLINT + std::vector & residual_names, // NOLINT + double & hazcam_depth_to_image_scale, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & scicam_to_hazcam_vec, // NOLINT + std::vector & hazcam_depth_to_image_vec, // NOLINT + Eigen::Vector2d & sci_cam_focal_vector, // NOLINT + Eigen::Vector2d & sci_cam_optical_center, // NOLINT + Eigen::VectorXd & sci_cam_distortion, // NOLINT + ceres::Problem & problem) { // NOLINT + auto left_it = haz_cam_to_left_nav_cam_index.find(haz_it); + auto right_it = haz_cam_to_right_nav_cam_index.find(haz_it); + if (left_it == haz_cam_to_left_nav_cam_index.end() || + right_it == haz_cam_to_right_nav_cam_index.end()) + LOG(FATAL) << "Book-keeping error 1 in add_haz_sci_cost."; + + int left_nav_it = left_it->second; + int right_nav_it = right_it->second; + + if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size()) || + haz_it >= static_cast(haz_cam_intensity_timestamps.size())) + LOG(FATAL) << "Book-keeping error 2 in add_haz_sci_cost."; + + // The haz and sci images must be bracketed by the same two nav images + { + auto left_it2 = sci_cam_to_left_nav_cam_index.find(sci_it); + auto right_it2 = sci_cam_to_right_nav_cam_index.find(sci_it); + if (left_it2 == sci_cam_to_left_nav_cam_index.end() || + right_it2 == sci_cam_to_right_nav_cam_index.end()) + LOG(FATAL) << "Book-keeping error 3 in add_haz_sci_cost."; + + int left_nav_it2 = left_it2->second; + int right_nav_it2 = right_it2->second; + + if (left_nav_it2 != left_nav_it || right_nav_it2 != right_nav_it) + LOG(FATAL) << "Book-keeping error 4 in add_haz_sci_cost."; + } + // Left and right nav cam image time, in haz_cam's time measurement + double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] + + navcam_to_hazcam_timestamp_offset; + double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] + + navcam_to_hazcam_timestamp_offset; + + // Find the haz and sci time and convert them to haz cam's clock + double haz_time = haz_cam_intensity_timestamps[haz_it]; + double sci_time = sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; + + bool good1 = (left_time <= haz_time && haz_time < right_time); + if (!good1) LOG(FATAL) << "Book-keeping error 5 in add_haz_sci_cost."; + + bool good2 = (left_time <= sci_time && sci_time < right_time); + if (!good2) LOG(FATAL) << "Book-keeping error 6 in add_haz_sci_cost."; + + // Find the transform from the world to nav cam at the haz cam time + Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; + Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; + + double alpha_haz = (haz_time - left_time) / (right_time - left_time); + if (right_time == left_time) alpha_haz = 0.0; // handle division by zero + + if (!FLAGS_timestamp_interpolation) alpha_haz = round(alpha_haz); + + Eigen::Affine3d world_to_nav_trans_at_haz_time = + dense_map::linearInterp(alpha_haz, left_nav_trans, right_nav_trans); + + double alpha_sci = (sci_time - left_time) / (right_time - left_time); + if (right_time == left_time) alpha_sci = 0.0; // handle division by zero + + if (!FLAGS_timestamp_interpolation) + alpha_sci = round(alpha_sci); + + Eigen::Affine3d world_to_nav_trans_at_sci_time = + dense_map::linearInterp(alpha_sci, left_nav_trans, right_nav_trans); + + std::vector const& haz_ip_vec = match_pair.first; + std::vector const& sci_ip_vec = match_pair.second; + + cv::Mat const& depth_cloud = depth_clouds[haz_it]; + + for (size_t ip_it = 0; ip_it < haz_ip_vec.size(); ip_it++) { + // Find the haz cam depth measurement. Use nearest neighbor interpolation + // to look into the depth cloud. + int col = round(haz_ip_vec[ip_it].x); + int row = round(haz_ip_vec[ip_it].y); + + if (col < 0 || row < 0 || col >= depth_cloud.cols || row >= depth_cloud.rows) + LOG(FATAL) << "Book-keeping error 7 in add_haz_sci_cost."; + + // Skip any point that goes out of bounds due to rounding + if (col == depth_cloud.cols || row == depth_cloud.rows) continue; + + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) continue; + + // Convert to Eigen + Eigen::Vector3d depth_xyz(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + + // Apply undistortion. Must take great care to not mix up + // distorted and undistorted pixels. + Eigen::Vector2d dist_haz_ip(haz_ip_vec[ip_it].x, haz_ip_vec[ip_it].y); + Eigen::Vector2d dist_sci_ip(sci_ip_vec[ip_it].x, sci_ip_vec[ip_it].y); + Eigen::Vector2d undist_haz_ip; + Eigen::Vector2d undist_sci_ip; + haz_cam_params.Convert(dist_haz_ip, &undist_haz_ip); + sci_cam_params.Convert(dist_sci_ip, &undist_sci_ip); + + // Ensure the depth point projects well into the haz cam interest point + ceres::CostFunction* depth_to_haz_cost_function = + dense_map::DepthToHazError::Create(undist_haz_ip, depth_xyz, depth_to_haz_block_sizes, haz_cam_params); + ceres::LossFunction* depth_to_haz_loss_function = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + problem.AddResidualBlock(depth_to_haz_cost_function, depth_to_haz_loss_function, &hazcam_depth_to_image_vec[0], + &hazcam_depth_to_image_scale); + residual_names.push_back("hazscihaz1"); + residual_names.push_back("hazscihaz2"); + + // Ensure that the depth points projects well in the sci cam interest point. + // Note how we pass a distorted sci cam pix, as in that error function we will + // take the difference of distorted pixels. + ceres::CostFunction* depth_to_sci_cost_function + = dense_map::DepthToSciError::Create(dist_sci_ip, depth_xyz, alpha_haz, + alpha_sci, depth_to_sci_block_sizes, sci_cam_params); + ceres::LossFunction* depth_to_sci_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + problem.AddResidualBlock(depth_to_sci_cost_function, depth_to_sci_loss_function, + &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)], + &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)], + &hazcam_to_navcam_vec[0], &scicam_to_hazcam_vec[0], &hazcam_depth_to_image_vec[0], + &hazcam_depth_to_image_scale, &sci_cam_focal_vector[0], &sci_cam_optical_center[0], + &sci_cam_distortion[0]); + + residual_names.push_back("hazscisci1"); + residual_names.push_back("hazscisci2"); + + if (FLAGS_fix_map) { + problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)]); + problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)]); + } + } + + return; + } + + // Prevent the linter from messing up with the beautiful formatting below + void add_nav_sci_cost + (// Inputs // NOLINT + int nav_it, int sci_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + double scicam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::vector const & sci_cam_timestamps, // NOLINT + std::map const & sci_cam_to_left_nav_cam_index, // NOLINT + std::map const & sci_cam_to_right_nav_cam_index, // NOLINT + Eigen::Affine3d const & hazcam_to_navcam_aff_trans, // NOLINT + Eigen::Affine3d const & scicam_to_hazcam_aff_trans, // NOLINT + camera::CameraParameters const & nav_cam_params, // NOLINT + camera::CameraParameters const & sci_cam_params, // NOLINT + std::vector const & nav_block_sizes, // NOLINT + std::vector const & sci_block_sizes, // NOLINT + std::vector const & mesh_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + mve::TriangleMesh::Ptr const & mesh, // NOLINT + std::shared_ptr const & bvh_tree, // NOLINT + // Outputs // NOLINT + int & nav_sci_xyz_count, // NOLINT + std::vector & residual_names, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & scicam_to_hazcam_vec, // NOLINT + Eigen::Vector2d & sci_cam_focal_vector, // NOLINT + Eigen::Vector2d & sci_cam_optical_center, // NOLINT + Eigen::VectorXd & sci_cam_distortion, // NOLINT + std::vector & initial_nav_sci_xyz, // NOLINT + std::vector & nav_sci_xyz, // NOLINT + ceres::Problem & problem) { // NOLINT + auto left_it = sci_cam_to_left_nav_cam_index.find(sci_it); + auto right_it = sci_cam_to_right_nav_cam_index.find(sci_it); + if (left_it == sci_cam_to_left_nav_cam_index.end() || + right_it == sci_cam_to_right_nav_cam_index.end()) + LOG(FATAL) << "Book-keeping error 1 in add_sci_sci_cost."; + + int left_nav_it = left_it->second; + int right_nav_it = right_it->second; + + if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size())) + LOG(FATAL) << "Book-keeping error 1 in add_nav_sci_cost."; + + // Figure out the two nav cam indices bounding the current sci cam + bool match_left = false; + if (sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset >= + sparse_map_timestamps[nav_cam_start + nav_it] + navcam_to_hazcam_timestamp_offset) { + match_left = true; + } else { + match_left = false; // match right then + } + + // Left and right nav cam image time, and sci cam time, in haz_cam's time measurement + double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] + navcam_to_hazcam_timestamp_offset; + double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] + navcam_to_hazcam_timestamp_offset; + double sci_time = sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; + + bool good = (left_time <= sci_time && sci_time < right_time); + + if (!good) LOG(FATAL) << "Book-keeping 2 in add_nav_sci_cost."; + + // Find the transform from the world to nav cam at the sci cam time + Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; + Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; + double alpha = (sci_time - left_time) / (right_time - left_time); + if (right_time == left_time) alpha = 0.0; // handle division by zero + + if (!FLAGS_timestamp_interpolation) alpha = round(alpha); + + Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(alpha, left_nav_trans, right_nav_trans); + + // Find the sci cam to world transform + Eigen::Affine3d interp_world_to_sci_trans = + scicam_to_hazcam_aff_trans.inverse() * + hazcam_to_navcam_aff_trans.inverse() * + interp_world_to_nav_trans; + + Eigen::Affine3d world_to_nav_trans; + if (match_left) { + world_to_nav_trans = left_nav_trans; + } else { + world_to_nav_trans = right_nav_trans; + } + + std::vector const& sci_ip_vec = match_pair.first; + std::vector const& nav_ip_vec = match_pair.second; + + for (size_t ip_it = 0; ip_it < sci_ip_vec.size(); ip_it++) { + Eigen::Vector2d dist_sci_ip(sci_ip_vec[ip_it].x, sci_ip_vec[ip_it].y); + Eigen::Vector2d dist_nav_ip(nav_ip_vec[ip_it].x, nav_ip_vec[ip_it].y); + Eigen::Vector2d undist_nav_ip; + Eigen::Vector2d undist_sci_ip; + nav_cam_params.Convert(dist_nav_ip, &undist_nav_ip); + sci_cam_params.Convert(dist_sci_ip, &undist_sci_ip); + + Eigen::Vector3d X = + dense_map::TriangulatePair(sci_cam_params.GetFocalLength(), nav_cam_params.GetFocalLength(), + interp_world_to_sci_trans, world_to_nav_trans, undist_sci_ip, undist_nav_ip); + + bool have_mesh_intersection = false; + if (FLAGS_mesh != "") { + // Try to make the intersection point be on the mesh and the nav cam ray + // to make the sci cam to conform to that. + // TODO(oalexan1): Think more of the range of the ray below + double min_ray_dist = 0.0; + double max_ray_dist = 10.0; + Eigen::Vector3d intersection(0.0, 0.0, 0.0); + have_mesh_intersection + = dense_map::ray_mesh_intersect(undist_nav_ip, nav_cam_params, + world_to_nav_trans, mesh, bvh_tree, + min_ray_dist, max_ray_dist, + // Output + intersection); + + // Overwrite the triangulation result above with the intersection + if (have_mesh_intersection) X = intersection; + } + + // Record the triangulated positions. These will be optimized. + for (int i = 0; i < dense_map::NUM_XYZ_PARAMS; i++) + nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count + i] = X[i]; + + // A copy of the triangulated positions which won't be optimized. + initial_nav_sci_xyz[nav_sci_xyz_count] = X; + + // The cost function of projecting in the sci cam. Note that we use dist_sci_ip, + // as in the cost function below we will do differences of distorted sci cam pixels. + ceres::CostFunction* sci_cost_function = + dense_map::SciError::Create(dist_sci_ip, alpha, sci_block_sizes, sci_cam_params); + ceres::LossFunction* sci_loss_function = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + problem.AddResidualBlock(sci_cost_function, sci_loss_function, + &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)], + &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)], + &hazcam_to_navcam_vec[0], &scicam_to_hazcam_vec[0], + &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count], &sci_cam_focal_vector[0], + &sci_cam_optical_center[0], &sci_cam_distortion[0]); + + residual_names.push_back("navscisci1"); + residual_names.push_back("navscisci2"); + + if (FLAGS_fix_map) { + problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)]); + problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)]); + } + + // The nav cam cost function + ceres::CostFunction* nav_cost_function = + dense_map::NavError::Create(undist_nav_ip, nav_block_sizes, nav_cam_params); + ceres::LossFunction* nav_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + problem.AddResidualBlock(nav_cost_function, nav_loss_function, + &nav_cam_vec[(nav_cam_start + nav_it) * NUM_RIGID_PARAMS], + &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count]); + residual_names.push_back("navscinav1"); + residual_names.push_back("navscinav2"); + + if (FLAGS_fix_map) + problem.SetParameterBlockConstant(&nav_cam_vec[(nav_cam_start + nav_it) + * NUM_RIGID_PARAMS]); + + if (have_mesh_intersection) { + // Constrain the sci cam texture to agree with the nav cam texture on the mesh + + ceres::CostFunction* mesh_cost_function = + dense_map::XYZError::Create(initial_nav_sci_xyz[nav_sci_xyz_count], + mesh_block_sizes, FLAGS_mesh_weight); + + ceres::LossFunction* mesh_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_mesh_robust_threshold); + + problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, + &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count]); + + residual_names.push_back("mesh_x"); + residual_names.push_back("mesh_y"); + residual_names.push_back("mesh_z"); + } + + // Very important, go forward in the vector of xyz. Do it at the end. + nav_sci_xyz_count++; + } + + return; + } + + void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { + int raw_image_cols = image.cols; + int raw_image_rows = image.rows; + int calib_image_cols = cam_params.GetDistortedSize()[0]; + int calib_image_rows = cam_params.GetDistortedSize()[1]; + + int factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" + << "These must be equal up to an integer factor.\n"; + } + + if (factor != 1) { + // TODO(oalexan1): This kind of resizing may be creating aliased images. + cv::Mat local_image; + cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); + local_image.copyTo(image); + } + + // Check + if (image.cols != calib_image_cols || image.rows != calib_image_rows) + LOG(FATAL) << "Sci cam images have the wrong size."; + } + + void select_images_to_match(// Inputs // NOLINT + double haz_cam_start_time, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + double scicam_to_hazcam_timestamp_offset, // NOLINT + std::vector const& sparse_map_timestamps, // NOLINT + std::vector const& all_haz_cam_inten_timestamps, // NOLINT + std::vector const& all_sci_cam_timestamps, // NOLINT + std::set const& sci_cam_timestamps_to_use, // NOLINT + dense_map::RosBagHandle const& nav_cam_handle, // NOLINT + dense_map::RosBagHandle const& sci_cam_handle, // NOLINT + dense_map::RosBagHandle const& haz_cam_points_handle, // NOLINT + dense_map::RosBagHandle const& haz_cam_intensity_handle, // NOLINT + camera::CameraParameters const& sci_cam_params, // NOLINT + // Outputs // NOLINT + int& nav_cam_start, // NOLINT + std::vector& cid_to_image_type, // NOLINT + std::vector& haz_cam_intensity_timestamps, // NOLINT + std::vector& sci_cam_timestamps, // NOLINT + std::vector& images, // NOLINT + std::vector& depth_clouds) { // NOLINT + // Wipe the outputs + nav_cam_start = -1; + cid_to_image_type.clear(); + haz_cam_intensity_timestamps.clear(); + sci_cam_timestamps.clear(); + images.clear(); + depth_clouds.clear(); + + bool stop_early = false; + double found_time = -1.0; + bool save_grayscale = true; // feature detection needs grayscale + + double navcam_to_scicam_timestamp_offset + = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; + + // Use these to keep track where in the bags we are. After one + // traversal forward in time they need to be reset. + int nav_cam_pos = 0, haz_cam_intensity_pos = 0, haz_cam_cloud_pos = 0, sci_cam_pos = 0; + + for (size_t map_it = 0; map_it + 1 < sparse_map_timestamps.size(); map_it++) { + if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { + // The case when we would like to start later. Note the second + // comparison after "&&". When FLAG_start is 0, we want to + // make sure if the first nav image from the bag is in the map + // we use it, so we don't skip it even if based on + // navcam_to_hazcam_timestamp_offset we should. + if (sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset + < FLAGS_start + haz_cam_start_time && + sparse_map_timestamps[map_it] < FLAGS_start + haz_cam_start_time) + continue; + } + + if (nav_cam_start < 0) nav_cam_start = map_it; + + images.push_back(cv::Mat()); + if (!dense_map::lookupImage(sparse_map_timestamps[map_it], nav_cam_handle.bag_msgs, + save_grayscale, images.back(), + nav_cam_pos, found_time)) { + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up nav cam at time " << sparse_map_timestamps[map_it] << ".\n"; + } + cid_to_image_type.push_back(dense_map::NAV_CAM); + + if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { + // If we would like to end earlier, then save the last nav cam image so far + // and quit + if (sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset > + FLAGS_start + FLAGS_duration + haz_cam_start_time) { + stop_early = true; + break; + } + } + + // Do not look up sci cam and haz cam images in time intervals bigger than this + if (std::abs(sparse_map_timestamps[map_it + 1] - sparse_map_timestamps[map_it]) > FLAGS_bracket_len) continue; + + // Append at most two haz cam images between consecutive sparse + // map timestamps, close to these sparse map timestamps. + std::vector local_haz_timestamps; + dense_map::pickTimestampsInBounds(all_haz_cam_inten_timestamps, + sparse_map_timestamps[map_it], + sparse_map_timestamps[map_it + 1], + -navcam_to_hazcam_timestamp_offset, + local_haz_timestamps); + + for (size_t samp_it = 0; samp_it < local_haz_timestamps.size(); samp_it++) { + haz_cam_intensity_timestamps.push_back(local_haz_timestamps[samp_it]); + + double nav_start = sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + double haz_time = local_haz_timestamps[samp_it] - haz_cam_start_time; + double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + + std::cout << "nav_start haz nav_end times " + << nav_start << ' ' << haz_time << ' ' << nav_end << std::endl; + std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; + + // Read the image + images.push_back(cv::Mat()); + if (!dense_map::lookupImage(haz_cam_intensity_timestamps.back(), + haz_cam_intensity_handle.bag_msgs, + save_grayscale, images.back(), haz_cam_intensity_pos, + found_time)) + LOG(FATAL) << "Cannot look up haz cam image at given time"; + cid_to_image_type.push_back(dense_map::HAZ_CAM); + + double cloud_time = -1.0; + depth_clouds.push_back(cv::Mat()); + if (!dense_map::lookupCloud(haz_cam_intensity_timestamps.back(), + haz_cam_points_handle.bag_msgs, + FLAGS_max_haz_cam_image_to_depth_timestamp_diff, + depth_clouds.back(), + haz_cam_cloud_pos, cloud_time)) { + // This need not succeed always + } + } + + // Append at most two sci cam images between consecutive sparse + // map timestamps, close to these sparse map timestamps. + + std::vector local_sci_timestamps; + dense_map::pickTimestampsInBounds(all_sci_cam_timestamps, sparse_map_timestamps[map_it], + sparse_map_timestamps[map_it + 1], + -navcam_to_scicam_timestamp_offset, + local_sci_timestamps); + + // Append to the vector of sampled timestamps + for (size_t samp_it = 0; samp_it < local_sci_timestamps.size(); samp_it++) { + // See if to use only specified timestamps + if (!sci_cam_timestamps_to_use.empty() && + sci_cam_timestamps_to_use.find(local_sci_timestamps[samp_it]) == + sci_cam_timestamps_to_use.end()) + continue; + + sci_cam_timestamps.push_back(local_sci_timestamps[samp_it]); + + double nav_start = sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + double sci_time = local_sci_timestamps[samp_it] + scicam_to_hazcam_timestamp_offset + - haz_cam_start_time; + double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + std::cout << "nav_start sci nav_end times " + << nav_start << ' ' << sci_time << ' ' << nav_end << std::endl; + std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; + + // Read the sci cam image, and perhaps adjust its size + images.push_back(cv::Mat()); + cv::Mat local_img; + if (!dense_map::lookupImage(sci_cam_timestamps.back(), sci_cam_handle.bag_msgs, + save_grayscale, local_img, + sci_cam_pos, found_time)) + LOG(FATAL) << "Cannot look up sci cam image at given time."; + adjustImageSize(sci_cam_params, local_img); + local_img.copyTo(images.back()); + + // Sanity check + Eigen::Vector2i sci_cam_size = sci_cam_params.GetDistortedSize(); + if (images.back().cols != sci_cam_size[0] || images.back().rows != sci_cam_size[1]) + LOG(FATAL) << "Sci cam images have the wrong size."; + + cid_to_image_type.push_back(dense_map::SCI_CAM); + } + } // End iterating over nav cam timestamps + + // Add the last nav cam image from the map, unless we stopped early and this was done + if (!stop_early) { + images.push_back(cv::Mat()); + if (!dense_map::lookupImage(sparse_map_timestamps.back(), nav_cam_handle.bag_msgs, + save_grayscale, images.back(), + nav_cam_pos, found_time)) + LOG(FATAL) << "Cannot look up nav cam image at given time."; + cid_to_image_type.push_back(dense_map::NAV_CAM); + } + + if (images.size() > sparse_map_timestamps.size() + haz_cam_intensity_timestamps.size() + + sci_cam_timestamps.size()) + LOG(FATAL) << "Book-keeping error in select_images_to_match."; + + return; + } + + void set_up_block_sizes(int num_scicam_focal_lengths, int num_scicam_distortions, + std::vector & depth_to_haz_block_sizes, + std::vector & nav_block_sizes, + std::vector & depth_to_nav_block_sizes, + std::vector & depth_to_sci_block_sizes, + std::vector & sci_block_sizes, + std::vector & mesh_block_sizes) { + // Wipe the outputs + depth_to_haz_block_sizes.clear(); + nav_block_sizes.clear(); + depth_to_nav_block_sizes.clear(); + depth_to_sci_block_sizes.clear(); + sci_block_sizes.clear(); + mesh_block_sizes.clear(); + + // Set up the variable blocks to optimize for DepthToHazError + depth_to_haz_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_haz_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for NavError + nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + nav_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + + // Set up the variable blocks to optimize for DepthToNavError + depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for DepthToSciError + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + depth_to_sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length + depth_to_sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center + depth_to_sci_block_sizes.push_back(num_scicam_distortions); // distortion + + // Set up the variable blocks to optimize for SciError + sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + sci_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length + sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center + sci_block_sizes.push_back(num_scicam_distortions); // distortion + + // Set up the variable blocks to optimize for the mesh xyz error + mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + } + + typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; + + // Wrapper to find the value of a const map at a given key + int mapVal(std::map const& map, int key) { + auto ptr = map.find(key); + if (ptr == map.end()) + LOG(FATAL) << "Cannot find map value for given index."; + return ptr->second; + } + + // Find nav images close in time. Match sci and haz images in between to each + // other and to these nave images + // TODO(oalexan1): Reword the above explanation + void detect_match_features(// Inputs // NOLINT + std::vector const& images, // NOLINT + std::vector const& cid_to_image_type, // NOLINT + // Outputs // NOLINT + std::map & image_to_nav_it, // NOLINT + std::map & image_to_haz_it, // NOLINT + std::map & image_to_sci_it, // NOLINT + std::map & haz_cam_to_left_nav_cam_index, // NOLINT + std::map & haz_cam_to_right_nav_cam_index, // NOLINT + std::map & sci_cam_to_left_nav_cam_index, // NOLINT + std::map & sci_cam_to_right_nav_cam_index, // NOLINT + MATCH_MAP & matches) { // NOLINT + // Wipe the outputs + image_to_nav_it.clear(); + image_to_haz_it.clear(); + image_to_sci_it.clear(); + haz_cam_to_left_nav_cam_index.clear(); + haz_cam_to_right_nav_cam_index.clear(); + sci_cam_to_left_nav_cam_index.clear(); + sci_cam_to_right_nav_cam_index.clear(); + matches.clear(); + + int nav_it = 0, haz_it = 0, sci_it = 0; + for (int image_it = 0; image_it < static_cast(images.size()); image_it++) { + if (cid_to_image_type[image_it] == dense_map::NAV_CAM) { + image_to_nav_it[image_it] = nav_it; + nav_it++; + } else if (cid_to_image_type[image_it] == dense_map::HAZ_CAM) { + image_to_haz_it[image_it] = haz_it; + haz_it++; + } else if (cid_to_image_type[image_it] == dense_map::SCI_CAM) { + image_to_sci_it[image_it] = sci_it; + sci_it++; + } + } + + std::vector > image_pairs; + + // Look at two neighboring nav images. Find left_img_it and + // right_img_it so cid_to_image_type for these are nav images. + for (int left_img_it = 0; left_img_it < static_cast(images.size()); left_img_it++) { + if (cid_to_image_type[left_img_it] != dense_map::NAV_CAM) continue; // Not nav cam + + // now find right_img_it + int right_img_it = -1; + for (int local_it = left_img_it + 1; local_it < static_cast(images.size()); local_it++) { + if (cid_to_image_type[local_it] == dense_map::NAV_CAM) { + right_img_it = local_it; + break; + } + } + + if (right_img_it < 0) continue; + + // Now look at sci and haz images in between + std::vector nav_cam_indices, haz_cam_indices, sci_cam_indices; + + nav_cam_indices.push_back(left_img_it); + nav_cam_indices.push_back(right_img_it); + + for (int local_img_it = left_img_it + 1; local_img_it < right_img_it; local_img_it++) { + int left_nav_it = mapVal(image_to_nav_it, left_img_it); + int right_nav_it = mapVal(image_to_nav_it, right_img_it); + + if (cid_to_image_type[local_img_it] == dense_map::HAZ_CAM) { + int haz_it = mapVal(image_to_haz_it, local_img_it); + haz_cam_indices.push_back(local_img_it); + haz_cam_to_left_nav_cam_index[haz_it] = left_nav_it; + haz_cam_to_right_nav_cam_index[haz_it] = right_nav_it; + + } else if (cid_to_image_type[local_img_it] == dense_map::SCI_CAM) { + int sci_it = mapVal(image_to_sci_it, local_img_it); + sci_cam_indices.push_back(local_img_it); + sci_cam_to_left_nav_cam_index[image_to_sci_it[local_img_it]] = left_nav_it; + sci_cam_to_right_nav_cam_index[image_to_sci_it[local_img_it]] = right_nav_it; + } + } + + // Match haz to nav + for (size_t haz_it = 0; haz_it < haz_cam_indices.size(); haz_it++) { + for (size_t nav_it = 0; nav_it < nav_cam_indices.size(); nav_it++) { + image_pairs.push_back(std::make_pair(haz_cam_indices[haz_it], nav_cam_indices[nav_it])); + } + } + + // Match haz to sci + for (size_t haz_it = 0; haz_it < haz_cam_indices.size(); haz_it++) { + for (size_t sci_it = 0; sci_it < sci_cam_indices.size(); sci_it++) { + image_pairs.push_back(std::make_pair(haz_cam_indices[haz_it], sci_cam_indices[sci_it])); + } + } + + // Match sci to nav + for (size_t nav_it = 0; nav_it < nav_cam_indices.size(); nav_it++) { + for (size_t sci_it = 0; sci_it < sci_cam_indices.size(); sci_it++) { + image_pairs.push_back(std::make_pair(sci_cam_indices[sci_it], nav_cam_indices[nav_it])); + } + } + } + + // Detect features using multiple threads + std::vector cid_to_descriptor_map; + std::vector cid_to_keypoint_map; + cid_to_descriptor_map.resize(images.size()); + cid_to_keypoint_map.resize(images.size()); + ff_common::ThreadPool thread_pool1; + for (size_t it = 0; it < images.size(); it++) + thread_pool1.AddTask(&dense_map::detectFeatures, images[it], FLAGS_verbose, + &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + thread_pool1.Join(); + + // Create the matches among nav, haz, and sci images. Note that we + // have a starting and ending nav cam images, and match all the + // haz and sci images to these two nav cam images and to each + // other. + + // Find the matches using multiple threads + ff_common::ThreadPool thread_pool2; + std::mutex match_mutex; + for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { + auto pair = image_pairs[pair_it]; + int left_image_it = pair.first, right_image_it = pair.second; + thread_pool2.AddTask(&dense_map::matchFeatures, &match_mutex, left_image_it, right_image_it, + cid_to_descriptor_map[left_image_it], + cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], cid_to_keypoint_map[right_image_it], + FLAGS_verbose, &matches[pair]); + } + thread_pool2.Join(); + + return; + } +} // namespace dense_map + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; + std::cout.precision(17); // to be able to print timestamps + + if (FLAGS_ros_bag.empty()) + LOG(FATAL) << "The bag file was not specified."; + if (FLAGS_sparse_map.empty()) + LOG(FATAL) << "The input sparse map was not specified."; + + if (FLAGS_output_map.empty()) + LOG(FATAL) << "The output sparse map was not specified."; + + if (!FLAGS_skip_registration) { + if (FLAGS_xyz_file.empty() || FLAGS_hugin_file.empty()) + LOG(FATAL) << "Either the hugin or xyz file was not specified."; + } + + if (FLAGS_opt_map_only && FLAGS_fix_map) + LOG(FATAL) << "Cannot both float the sparse map and keep it fixed."; + + if (FLAGS_robust_threshold <= 0.0) + LOG(FATAL) << "The robust threshold must be positive.\n"; + + // Set up handles for reading data at given time stamp without + // searching through the whole bag each time. + dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); + dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); + dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); + dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); + + if (nav_cam_handle.bag_msgs.empty()) LOG(FATAL) << "No nav cam images found."; + if (sci_cam_handle.bag_msgs.empty()) LOG(FATAL) << "No sci cam images found."; + if (haz_cam_intensity_handle.bag_msgs.empty()) LOG(FATAL) << "No haz cam images found."; + + // Read the config file + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d navcam_to_body_trans; + Eigen::Affine3d hazcam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, navcam_to_body_trans, + hazcam_depth_to_image_transform); + + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_names.size(); it++) { + if (cam_names[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + ref_to_cam_timestamp_offsets[it] = new_val; + } + } + } + + // Copy to structures expected by this tool + camera::CameraParameters nav_cam_params = cam_params[0]; + camera::CameraParameters haz_cam_params = cam_params[1]; + camera::CameraParameters sci_cam_params = cam_params[2]; + Eigen::Affine3d hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); + Eigen::Affine3d scicam_to_hazcam_aff_trans = + ref_to_cam_trans[1] * ref_to_cam_trans[2].inverse(); + double navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; + double scicam_to_hazcam_timestamp_offset = + ref_to_cam_timestamp_offsets[1] - ref_to_cam_timestamp_offsets[2]; + + if (FLAGS_mesh_weight <= 0.0 || FLAGS_mesh_robust_threshold <= 0.0) + LOG(FATAL) << "The mesh weight and robust threshold must be positive.\n"; + + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; + std::shared_ptr bvh_tree; + if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + +#if 0 + std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; + std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; + std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() + << "\n"; +#endif + std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; + std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; + + // Read the sparse map + boost::shared_ptr sparse_map = + boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + + // TODO(oalexan1): All this timestamp reading logic below should be in a function + + // Find the minimum and maximum timestamps in the sparse map + double min_map_timestamp = std::numeric_limits::max(); + double max_map_timestamp = -min_map_timestamp; + std::vector sparse_map_timestamps; + const std::vector& sparse_map_images = sparse_map->cid_to_filename_; + sparse_map_timestamps.resize(sparse_map_images.size()); + for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { + double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); + sparse_map_timestamps[cid] = timestamp; + min_map_timestamp = std::min(min_map_timestamp, sparse_map_timestamps[cid]); + max_map_timestamp = std::max(max_map_timestamp, sparse_map_timestamps[cid]); + } + if (sparse_map_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; + + // Read the haz cam timestamps + std::vector const& haz_cam_intensity_msgs + = haz_cam_intensity_handle.bag_msgs; + if (haz_cam_intensity_msgs.empty()) LOG(FATAL) << "No haz cam messages are present."; + double haz_cam_start_time = -1.0; + std::vector all_haz_cam_inten_timestamps; + for (size_t it = 0; it < haz_cam_intensity_msgs.size(); it++) { + sensor_msgs::Image::ConstPtr image_msg + = haz_cam_intensity_msgs[it].instantiate(); + if (image_msg) { + double haz_cam_time = image_msg->header.stamp.toSec(); + all_haz_cam_inten_timestamps.push_back(haz_cam_time); + if (haz_cam_start_time < 0) haz_cam_start_time = haz_cam_time; + } + } + + // Read the sci cam timestamps from the bag + std::vector const& sci_cam_msgs = sci_cam_handle.bag_msgs; + std::vector all_sci_cam_timestamps; + for (size_t sci_it = 0; sci_it < sci_cam_msgs.size(); sci_it++) { + sensor_msgs::Image::ConstPtr sci_image_msg + = sci_cam_msgs[sci_it].instantiate(); + sensor_msgs::CompressedImage::ConstPtr comp_sci_image_msg = + sci_cam_msgs[sci_it].instantiate(); + if (sci_image_msg) + all_sci_cam_timestamps.push_back(sci_image_msg->header.stamp.toSec()); + else if (comp_sci_image_msg) + all_sci_cam_timestamps.push_back(comp_sci_image_msg->header.stamp.toSec()); + } + + // If desired to process only specific timestamps + std::set sci_cam_timestamps_to_use; + if (FLAGS_sci_cam_timestamps != "") { + std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); + double val; + while (ifs >> val) sci_cam_timestamps_to_use.insert(val); + } + + // Will optimize the nav cam poses as part of the process + std::vector& nav_cam_affines = sparse_map->cid_to_cam_t_global_; // alias + + if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { + double latest_start + = sparse_map_timestamps.back() + navcam_to_hazcam_timestamp_offset - haz_cam_start_time; + if (latest_start < FLAGS_start) { + std::cout << "The sparse map ended before the desired start time. " + << "Use a start time no later than " << latest_start << "." << std::endl; + return 1; + } + + double earliest_end = sparse_map_timestamps[0] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + if (earliest_end > FLAGS_start + FLAGS_duration) { + std::cout << "The sparse map did not start yet. The sum of start and duration " + << "must be at least " << earliest_end << "." << std::endl; + return 1; + } + } + + int nav_cam_start = -1; // Record the first nav cam image used + std::vector cid_to_image_type; + std::vector haz_cam_intensity_timestamps, sci_cam_timestamps; // timestamps we will use + std::vector images; + std::vector depth_clouds; + // Find nav images close in time. Match sci and haz images in between to each + // other and to these nave images + // TODO(oalexan1): This selects haz cam images outside of bracket. + // TODO(oalexan1): No check for bracket size either. + select_images_to_match( // Inputs // NOLINT + haz_cam_start_time, navcam_to_hazcam_timestamp_offset, // NOLINT + scicam_to_hazcam_timestamp_offset, // NOLINT + sparse_map_timestamps, all_haz_cam_inten_timestamps, // NOLINT + all_sci_cam_timestamps, sci_cam_timestamps_to_use, // NOLINT + nav_cam_handle, sci_cam_handle, haz_cam_points_handle, // NOLINT + haz_cam_intensity_handle, // NOLINT + sci_cam_params, // NOLINT + // Outputs // NOLINT + nav_cam_start, cid_to_image_type, // NOLINT + haz_cam_intensity_timestamps, sci_cam_timestamps, // NOLINT + images, depth_clouds); // NOLINT + + // If an image in the vector of images is of nav_cam type, see where its index is + // in the list of nav cam images + std::map image_to_nav_it, image_to_haz_it, image_to_sci_it; + + // Given the index of a haz cam or sci cam image, find the index of the left and right + // nav cam images bounding it in time. + std::map haz_cam_to_left_nav_cam_index, haz_cam_to_right_nav_cam_index; + std::map sci_cam_to_left_nav_cam_index, sci_cam_to_right_nav_cam_index; + + // Map haz cam and nav cam index to the vectors of haz cam to nav cam matches + dense_map::MATCH_MAP matches; + + // TODO(oalexan1): Add explanation here + detect_match_features(// Inputs // NOLINT + images, cid_to_image_type, // NOLINT + // Outputs // NOLINT + image_to_nav_it, image_to_haz_it, // NOLINT + image_to_sci_it, // NOLINT + haz_cam_to_left_nav_cam_index, // NOLINT + haz_cam_to_right_nav_cam_index, // NOLINT + sci_cam_to_left_nav_cam_index, // NOLINT + sci_cam_to_right_nav_cam_index, // NOLINT + matches); // NOLINT + + double hazcam_depth_to_image_scale = pow(hazcam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); + + // Since we will keep the scale fixed, vary the part of the transform without + // the scale, while adding the scale each time before the transform is applied + Eigen::Affine3d hazcam_depth_to_image_noscale = hazcam_depth_to_image_transform; + hazcam_depth_to_image_noscale.linear() /= hazcam_depth_to_image_scale; + + // Form the optimization vector for hazcam_depth_to_image_transform + std::vector hazcam_depth_to_image_vec(dense_map::NUM_RIGID_PARAMS); + dense_map::rigid_transform_to_array(hazcam_depth_to_image_noscale, &hazcam_depth_to_image_vec[0]); + + // Go back from the array to the original transform. This will make it into a pure + // scale*rotation + translation, removing the artifacts of it having been read + // from disk where it was stored with just a few digits of precision + dense_map::array_to_rigid_transform(hazcam_depth_to_image_transform, &hazcam_depth_to_image_vec[0]); + hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; + + // Form the optimization vector for hazcam_to_navcam_aff_trans + std::vector hazcam_to_navcam_vec(dense_map::NUM_RIGID_PARAMS); + dense_map::rigid_transform_to_array(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); + + // Recreate hazcam_to_navcam_aff_trans as above + dense_map::array_to_rigid_transform(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); + + // Form the optimization vector for scicam_to_hazcam_aff_trans + std::vector scicam_to_hazcam_vec(dense_map::NUM_RIGID_PARAMS); + dense_map::rigid_transform_to_array(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); + + // Recreate scicam_to_hazcam_aff_trans as above + dense_map::array_to_rigid_transform(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); + + // See how many nav to sci matches we have and allocate space for their xyz + // TODO(oalexan1): This must be factored out as a function + int num_nav_sci_xyz = 0; + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; + + std::map::iterator image_haz_it, image_nav_it, image_sci_it; + + image_sci_it = image_to_sci_it.find(index_pair.first); + image_nav_it = image_to_nav_it.find(index_pair.second); + if (image_sci_it != image_to_sci_it.end() && image_nav_it != image_to_nav_it.end()) { + int sci_it = image_sci_it->second; + int nav_it = image_nav_it->second; + + // Add to the number of xyz + num_nav_sci_xyz += match_pair.first.size(); + + if (FLAGS_verbose) saveImagesAndMatches("sci", "nav", index_pair, match_pair, images); + } + + image_haz_it = image_to_haz_it.find(index_pair.first); + image_sci_it = image_to_sci_it.find(index_pair.second); + if (image_haz_it != image_to_haz_it.end() && image_sci_it != image_to_sci_it.end()) { + if (FLAGS_verbose) saveImagesAndMatches("haz", "sci", index_pair, match_pair, images); + } + + image_haz_it = image_to_haz_it.find(index_pair.first); + image_nav_it = image_to_nav_it.find(index_pair.second); + if (image_haz_it != image_to_haz_it.end() && image_nav_it != image_to_nav_it.end()) { + if (FLAGS_verbose) saveImagesAndMatches("haz", "nav", index_pair, match_pair, images); + } + } + + // Prepare for floating sci cam params + int num_scicam_focal_lengths = 1; // Same focal length in x and y + std::set sci_cam_intrinsics_to_float; + dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, sci_cam_intrinsics_to_float); + Eigen::Vector2d sci_cam_focal_vector = sci_cam_params.GetFocalVector(); + if (num_scicam_focal_lengths == 1) { + sci_cam_focal_vector[0] = sci_cam_params.GetFocalLength(); // average the two focal lengths + sci_cam_focal_vector[1] = sci_cam_focal_vector[0]; + } + std::cout << std::endl; + std::cout << "Initial focal length for sci cam: " << sci_cam_focal_vector.transpose() << std::endl; + Eigen::Vector2d sci_cam_optical_center = sci_cam_params.GetOpticalOffset(); + std::cout << "Initial optical center for sci_cam: " << sci_cam_optical_center.transpose() << std::endl; + Eigen::VectorXd sci_cam_distortion = sci_cam_params.GetDistortion(); + std::cout << "Initial distortion for sci_cam: " << sci_cam_distortion.transpose() << std::endl; + + std::vector depth_to_haz_block_sizes, nav_block_sizes; + std::vector depth_to_nav_block_sizes, depth_to_sci_block_sizes; + std::vector sci_block_sizes, mesh_block_sizes; + dense_map::set_up_block_sizes(// Inputs // NOLINT + num_scicam_focal_lengths, sci_cam_distortion.size(), // NOLINT + // Outputs // NOLINT + depth_to_haz_block_sizes, nav_block_sizes, // NOLINT + depth_to_nav_block_sizes, depth_to_sci_block_sizes, // NOLINT + sci_block_sizes, mesh_block_sizes); // NOLINT + + // Storage for the residual names and xyz + std::vector residual_names; + std::vector nav_sci_xyz(dense_map::NUM_XYZ_PARAMS * num_nav_sci_xyz, 0); + std::vector initial_nav_sci_xyz(num_nav_sci_xyz, Eigen::Vector3d(0, 0, 0)); + + // Put the sparse map camera transforms in a vector so we can optimize them further + int num_cams = nav_cam_affines.size(); + std::vector nav_cam_vec(num_cams * dense_map::NUM_RIGID_PARAMS); + for (int cid = 0; cid < num_cams; cid++) + dense_map::rigid_transform_to_array(nav_cam_affines[cid], + &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + int nav_sci_xyz_count = 0; + + ceres::Problem problem; + + // Form the problem + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + + std::map::iterator image_haz_it, image_nav_it, image_sci_it; + + // Doing haz cam to nav cam matches + image_haz_it = image_to_haz_it.find(index_pair.first); + image_nav_it = image_to_nav_it.find(index_pair.second); + if (image_haz_it != image_to_haz_it.end() && image_nav_it != image_to_nav_it.end()) { + int haz_it = image_haz_it->second; + int nav_it = image_nav_it->second; + dense_map::add_haz_nav_cost(// Inputs // NOLINT + haz_it, nav_it, nav_cam_start, // NOLINT + navcam_to_hazcam_timestamp_offset, // NOLINT + match_pair, haz_cam_intensity_timestamps, // NOLINT + sparse_map_timestamps, haz_cam_to_left_nav_cam_index, // NOLINT + haz_cam_to_right_nav_cam_index, nav_cam_params, // NOLINT + haz_cam_params, depth_to_nav_block_sizes, // NOLINT + depth_to_haz_block_sizes, // NOLINT + nav_cam_affines, depth_clouds, // NOLINT + // Outputs // NOLINT + residual_names, hazcam_depth_to_image_scale, // NOLINT + nav_cam_vec, hazcam_to_navcam_vec, // NOLINT + hazcam_depth_to_image_vec, // NOLINT + problem); // NOLINT + } + + // Doing haz cam to sci cam matches + image_haz_it = image_to_haz_it.find(index_pair.first); + image_sci_it = image_to_sci_it.find(index_pair.second); + if (image_haz_it != image_to_haz_it.end() && image_sci_it != image_to_sci_it.end()) { + int haz_it = image_haz_it->second; + int sci_it = image_sci_it->second; + add_haz_sci_cost( // Inputs // NOLINT + haz_it, sci_it, nav_cam_start, navcam_to_hazcam_timestamp_offset, // NOLINT + scicam_to_hazcam_timestamp_offset, match_pair, // NOLINT + haz_cam_intensity_timestamps, sparse_map_timestamps, // NOLINT + sci_cam_timestamps, haz_cam_to_left_nav_cam_index, // NOLINT + haz_cam_to_right_nav_cam_index, sci_cam_to_left_nav_cam_index, // NOLINT + sci_cam_to_right_nav_cam_index, sci_cam_params, // NOLINT + haz_cam_params, depth_to_sci_block_sizes, // NOLINT + depth_to_haz_block_sizes, nav_cam_affines, // NOLINT + depth_clouds, // NOLINT + // Outputs // NOLINT + residual_names, hazcam_depth_to_image_scale, nav_cam_vec, // NOLINT + hazcam_to_navcam_vec, scicam_to_hazcam_vec, // NOLINT + hazcam_depth_to_image_vec, sci_cam_focal_vector, // NOLINT + sci_cam_optical_center, sci_cam_distortion, problem); // NOLINT + } + + // Doing sci cam to nav cam matches + image_nav_it = image_to_nav_it.find(index_pair.second); + image_sci_it = image_to_sci_it.find(index_pair.first); + if (image_nav_it != image_to_nav_it.end() && image_sci_it != image_to_sci_it.end()) { + int nav_it = image_nav_it->second; + int sci_it = image_sci_it->second; + + add_nav_sci_cost(// Inputs // NOLINT + nav_it, sci_it, nav_cam_start, // NOLINT + navcam_to_hazcam_timestamp_offset, // NOLINT + scicam_to_hazcam_timestamp_offset, // NOLINT + match_pair, // NOLINT + sparse_map_timestamps, sci_cam_timestamps, // NOLINT + sci_cam_to_left_nav_cam_index, sci_cam_to_right_nav_cam_index, // NOLINT + hazcam_to_navcam_aff_trans, scicam_to_hazcam_aff_trans, // NOLINT + nav_cam_params, sci_cam_params, // NOLINT + nav_block_sizes, sci_block_sizes, mesh_block_sizes, // NOLINT + nav_cam_affines, depth_clouds, mesh, // NOLINT + bvh_tree, // NOLINT + // Outputs // NOLINT + nav_sci_xyz_count, residual_names, // NOLINT + nav_cam_vec, hazcam_to_navcam_vec, // NOLINT + scicam_to_hazcam_vec, sci_cam_focal_vector, // NOLINT + sci_cam_optical_center, sci_cam_distortion, // NOLINT + initial_nav_sci_xyz, nav_sci_xyz, // NOLINT + problem); // NOLINT + } + } // end iterating over matches + + if (nav_sci_xyz_count != num_nav_sci_xyz) LOG(FATAL) << "nav sci xyz book-keeping error."; + + if (sparse_map->pid_to_cid_fid_.size() != sparse_map->pid_to_xyz_.size()) + LOG(FATAL) << "Book-keeping error 1 in the sparse map."; + + for (size_t pid = 0; pid < sparse_map->pid_to_cid_fid_.size(); pid++) { + // Note that xyz is an alias. This is very important as we optimize these + // xyz in-place. + Eigen::Vector3d& xyz = sparse_map->pid_to_xyz_[pid]; + + for (auto cid_fid = sparse_map->pid_to_cid_fid_[pid].begin(); + cid_fid != sparse_map->pid_to_cid_fid_[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + Eigen::Matrix2Xd& keypoints = sparse_map->cid_to_keypoint_map_[cid]; // alias + + if (fid > keypoints.cols()) LOG(FATAL) << "Book-keeping error 2 in the sparse map."; + + Eigen::Vector2d undist_nav_ip = keypoints.col(fid); + + ceres::CostFunction* nav_cost_function = + dense_map::NavError::Create(undist_nav_ip, nav_block_sizes, nav_cam_params); + ceres::LossFunction* nav_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + problem.AddResidualBlock(nav_cost_function, nav_loss_function, + &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid], + &xyz[0]); + residual_names.push_back("nav1"); + residual_names.push_back("nav2"); + + if (FLAGS_fix_map) + problem.SetParameterBlockConstant(&nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); + } + } + + if (FLAGS_opt_map_only) { + problem.SetParameterBlockConstant(&hazcam_depth_to_image_vec[0]); + problem.SetParameterBlockConstant(&hazcam_depth_to_image_scale); + problem.SetParameterBlockConstant(&hazcam_to_navcam_vec[0]); + problem.SetParameterBlockConstant(&scicam_to_hazcam_vec[0]); + } + + // Evaluate the residual before optimization + double total_cost = 0.0; + std::vector residuals; + ceres::Problem::EvaluateOptions eval_options; + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "Book-keeping error in the number of residuals."; + + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "initial res " << residual_names[it] << " " << residuals[it] << std::endl; + } + + // Want the RMSE residual with loss, to understand what all residuals contribute, + // but without getting distracted by the outliers + eval_options.apply_loss_function = false; + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + dense_map::calc_median_residuals(residuals, residual_names, "before opt"); + + // Experimentally it was found that it was better to keep the scale + // constant and optimize everything else. Later registration will + // happen to correct any drift in the nav cam poses. + if (!FLAGS_float_scale) problem.SetParameterBlockConstant(&hazcam_depth_to_image_scale); + + // See which sci cam intrinsics values to float or keep fixed + if (sci_cam_intrinsics_to_float.find("focal_length") == sci_cam_intrinsics_to_float.end()) { + // std::cout << "For " << sci_cam << " not floating focal_length." << std::endl; + problem.SetParameterBlockConstant(&sci_cam_focal_vector[0]); + } else { + // std::cout << "For " << sci_cam << " floating focal_length." << std::endl; + } + if (sci_cam_intrinsics_to_float.find("optical_center") == sci_cam_intrinsics_to_float.end()) { + // std::cout << "For " << sci_cam << " not floating optical_center." << std::endl; + problem.SetParameterBlockConstant(&sci_cam_optical_center[0]); + } else { + // std::cout << "For " << sci_cam << " floating optical_center." << std::endl; + } + if (sci_cam_intrinsics_to_float.find("distortion") == sci_cam_intrinsics_to_float.end()) { + // std::cout << "For " << sci_cam << " not floating distortion." << std::endl; + problem.SetParameterBlockConstant(&sci_cam_distortion[0]); + } else { + // std::cout << "For " << sci_cam << " floating distortion." << std::endl; + } + + // Solve the problem + ceres::Solver::Options options; + ceres::Solver::Summary summary; + options.linear_solver_type = ceres::ITERATIVE_SCHUR; + options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread + options.max_num_iterations = FLAGS_num_iterations; + options.minimizer_progress_to_stdout = true; + options.gradient_tolerance = 1e-16; + options.function_tolerance = 1e-16; + options.parameter_tolerance = FLAGS_parameter_tolerance; + ceres::Solve(options, &problem, &summary); + + // Copy back the optimized intrinsics + sci_cam_params.SetFocalLength(Eigen::Vector2d(sci_cam_focal_vector[0], + sci_cam_focal_vector[0])); + sci_cam_params.SetOpticalOffset(sci_cam_optical_center); + sci_cam_params.SetDistortion(sci_cam_distortion); + + // Update with the optimized results + dense_map::array_to_rigid_transform(hazcam_depth_to_image_transform, &hazcam_depth_to_image_vec[0]); + hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; + dense_map::array_to_rigid_transform(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); + dense_map::array_to_rigid_transform(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); + for (int cid = 0; cid < num_cams; cid++) + dense_map::array_to_rigid_transform(nav_cam_affines[cid], &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // Compute the residuals without loss, want to see the outliers too + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "Book-keeping error in the number of residuals."; + + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "final res " << residual_names[it] << " " << residuals[it] << std::endl; + } + + // Want the RMSE residual with loss, to understand what all residuals contribute, + // but without getting distracted by the outliers + eval_options.apply_loss_function = false; + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + dense_map::calc_median_residuals(residuals, residual_names, "after opt"); + + // Re-register the map, and keep track of what scale was used. + // Note that we do not update the positions of the sci and haz + // images. We don't need those any more. + if (!FLAGS_skip_registration) { + std::vector data_files; + data_files.push_back(FLAGS_hugin_file); + data_files.push_back(FLAGS_xyz_file); + bool verification = false; + double map_scale + = sparse_mapping::RegistrationOrVerification(data_files, verification, sparse_map.get()); + + // Apply the scale + hazcam_depth_to_image_scale *= map_scale; + // This transform is affine, so both the linear and translation parts need a scale + hazcam_depth_to_image_transform.linear() *= map_scale; + hazcam_depth_to_image_transform.translation() *= map_scale; + // These two are rotation + translation, so only the translation needs the scale + hazcam_to_navcam_aff_trans.translation() *= map_scale; + scicam_to_hazcam_aff_trans.translation() *= map_scale; + } + + std::cout << "Writing: " << FLAGS_output_map << std::endl; + sparse_map->Save(FLAGS_output_map); + + if (!FLAGS_opt_map_only) { + // Save back the results + // Recall that cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + // nav_to_haz + ref_to_cam_trans[1] = hazcam_to_navcam_aff_trans.inverse(); + // nav_to_sci + ref_to_cam_trans[2] = scicam_to_hazcam_aff_trans.inverse() * + hazcam_to_navcam_aff_trans.inverse(); + ref_to_cam_timestamp_offsets[1] = navcam_to_hazcam_timestamp_offset; + ref_to_cam_timestamp_offsets[2] = + navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; + dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", + cam_params, ref_to_cam_trans, + ref_to_cam_timestamp_offsets, + hazcam_depth_to_image_transform); + } + + return 0; +} // NOLINT + diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index 7614f4d8..99e0b467 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -355,13 +355,17 @@ double median_mesh_edge(cv::Mat const& depthMat) { // Add points on edges and inside triangles having distances longer // than the median edge length times a factor. Store the combined set // in depthMat by adding further rows to it. -void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero) { +void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, double voxel_size, Vec5d const& Zero) { double median_len = median_mesh_edge(depthMat); if (median_len <= 0) return; // should not happen // Add points if lengths are more than this - double min_len = 1.3 * median_len; + double min_len = std::min(1.3 * median_len, 0.9 * voxel_size); + + // There is no point in this being way smaller than the voxels these + // points will end up being binned into. + min_len = std::max(min_len, 0.4 * voxel_size); std::vector points; @@ -788,8 +792,20 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa } // Add weights which are higher at image center and decrease towards -// boundary. This makes voxblox blend better. -void calc_weights(cv::Mat& depthMat, double exponent, bool simulated_data) { +// boundary. This makes voxblox blend better. Need a lot of care, as +// voxblox does not like weights under 1e-6 or so, neither ones above +// 1e+6, since it treats them as floats. +void calc_weights(cv::Mat& depthMat, double reliability_weight_exponent, + double max_ray_length, bool simulated_data) { + // The weights defined below decay too fast, causing problems for voxblox. + // Hence multiply them all by something to normalize them somewhat. This + // factor should not be too big either, per the earlier note. + double factor = 1.0; + if (!simulated_data) { + factor = Eigen::Vector2d(depthMat.rows / 2.0, depthMat.cols / 2.0).norm(); + factor = pow(factor, reliability_weight_exponent/2.0); + } + for (int row = 0; row < depthMat.rows; row++) { #pragma omp parallel for for (int col = 0; col < depthMat.cols; col++) { @@ -803,11 +819,14 @@ void calc_weights(cv::Mat& depthMat, double exponent, bool simulated_data) { float weight = 1.0; if (!simulated_data) { // Some kind of function decaying from center - weight = 1.0 / pow(1.0 + dist_sq, exponent/2.0); + weight = factor / pow(1.0 + dist_sq, reliability_weight_exponent/2.0); // Also, points that are further in z are given less weight double z = depthMat.at(row, col)[2]; - weight *= 1.0 / (0.001 + z * z); + + // Likely max_ray_length is between 1 and 10, and then this factor + // is between 0.1 and 10. + weight *= max_ray_length / (0.001 + z * z); // Make sure the weight does not get too small as voxblox // will read it as a float. Here making the weights @@ -826,6 +845,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, bool simulated_data, int depth_exclude_columns, int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, double reliability_weight_exponent, + double max_ray_length, double voxel_size, std::vector const& median_filter_params, bool save_debug_data, const char* filename_buffer, Eigen::MatrixXd const& desired_cam_to_world_trans) { @@ -861,11 +881,16 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, double y = pc.points[count].y; double z = pc.points[count].z; - // Pick first channel. The precise color of the depth cloud is not important. - // It will be wiped later during smoothing and hole-filling anyway. + // Pick the first image channel. The precise color of the depth + // cloud is not important. It will be wiped later during + // smoothing and hole-filling anyway. double i = haz_cam_intensity.at(row, col)[0]; - bool skip = (col < depth_exclude_columns || depthMat.cols - col < depth_exclude_columns || - row < depth_exclude_rows || depthMat.rows - row < depth_exclude_rows); + + double ray_len = Eigen::Vector3d(x, y, z).norm(); + // Exclude points at the boundary or points further than max_ray_length. + bool skip = (col < depth_exclude_columns || depthMat.cols - 1 - col < depth_exclude_columns || + row < depth_exclude_rows || depthMat.rows - 1 - row < depth_exclude_rows || + ray_len > max_ray_length || std::isnan(ray_len) || std::isinf(ray_len)); if ((x == 0 && y == 0 && z == 0) || skip) depthMat.at(row, col) = Zero; @@ -904,12 +929,12 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, // This is the right place at which to add weights, before adding // extra points messes up with the number of rows in in depthMat. - calc_weights(depthMat, reliability_weight_exponent, simulated_data); + calc_weights(depthMat, reliability_weight_exponent, max_ray_length, simulated_data); // Add extra points, but only if we are committed to manipulating the // depth cloud to start with if (depth_hole_fill_diameter > 0 && !simulated_data) - add_extra_pts(depthMat, workMat, Zero); + add_extra_pts(depthMat, workMat, voxel_size, Zero); if (save_debug_data) { // Save the updated depthMat as a mesh (for debugging) @@ -1052,7 +1077,8 @@ void saveCameraPoses( Eigen::Affine3d const& nav_cam_to_desired_cam_trans, Eigen::Affine3d& haz_cam_depth_to_image_transform, int depth_exclude_columns, int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, - double reliability_weight_exponent, std::vector const& median_filter_params, + double reliability_weight_exponent, double max_ray_length, double voxel_size, + std::vector const& median_filter_params, std::vector const& desired_cam_msgs, std::vector const& nav_cam_msgs, // always needed when not doing sim std::vector const& haz_cam_points_msgs, @@ -1060,6 +1086,7 @@ void saveCameraPoses( std::map> const& sci_cam_exif, std::string const& output_dir, std::string const& desired_cam_dir, double start, double duration, double sampling_spacing_seconds, double dist_between_processed_cams, + double angle_between_processed_cams, std::set const& sci_cam_timestamps, double max_iso_times_exposure, boost::shared_ptr sparse_map, bool use_brisk_map, bool do_haz_cam_image, @@ -1104,6 +1131,7 @@ void saveCameraPoses( // This is used to ensure cameras are not too close Eigen::Vector3d prev_cam_ctr(std::numeric_limits::quiet_NaN(), 0, 0); + Eigen::Affine3d prev_trans = Eigen::Affine3d::Identity(); // Compute the starting bag time as the timestamp of the first cloud for (auto& m : haz_cam_points_msgs) { @@ -1206,18 +1234,26 @@ void saveCameraPoses( // Success localizing. Update the time at which it took place for next time. prev_time = curr_time; - std::cout << "Time elapsed since the bag started: " << curr_time - beg_time << "\n"; + std::cout << "Time elapsed since the bag started: " << curr_time - beg_time << " seconds.\n"; // See if to skip some images if the robot did not move much Eigen::Affine3d curr_trans; curr_trans.matrix() = desired_cam_to_world_trans; Eigen::Vector3d curr_cam_ctr = curr_trans.translation(); double curr_dist_bw_cams = (prev_cam_ctr - curr_cam_ctr).norm(); - if (!std::isnan(prev_cam_ctr[0]) && !custom_sci_timestamps && - curr_dist_bw_cams < dist_between_processed_cams) { - std::cout << "Distance to previously processed camera is " - << curr_dist_bw_cams << ", skipping it." - << std::endl; + Eigen::Affine3d world_pose_change = curr_trans * (prev_trans.inverse()); + double angle_diff = dense_map::maxRotationAngle(world_pose_change); + if (!std::isnan(prev_cam_ctr[0])) + std::cout << std::setprecision(4) + << "Position and orientation changes relative to prev. processed cam are " + << curr_dist_bw_cams << " m and " << angle_diff <<" deg.\n"; + + bool skip_processing = (!std::isnan(prev_cam_ctr[0]) && + !custom_sci_timestamps && + curr_dist_bw_cams < dist_between_processed_cams && + angle_diff < angle_between_processed_cams); + if (skip_processing) { + std::cout << "Skipping this camera." << std::endl; continue; } @@ -1280,7 +1316,8 @@ void saveCameraPoses( simulated_data, depth_exclude_columns, depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, - reliability_weight_exponent, median_filter_params, + reliability_weight_exponent, max_ray_length, + voxel_size, median_filter_params, save_debug_data, filename_buffer, desired_cam_to_world_trans); @@ -1290,6 +1327,7 @@ void saveCameraPoses( // Update the previous camera center prev_cam_ctr = curr_cam_ctr; + prev_trans = curr_trans; } std::cout << "Wrote: " << index_file << std::endl; @@ -1355,12 +1393,15 @@ DEFINE_double(sampling_spacing_seconds, 0.5, "Spacing to use, in seconds, between consecutive depth images in " "the bag that are processed."); DEFINE_double(dist_between_processed_cams, 0.1, - "Once an image or depth image is processed, how far the camera " - "should move (in meters) before it should process more data."); + "Once an image or depth cloud is processed, " + "process a new one whenever either the camera moves by more than this " + "distance, in meters, or the angle changes by more than " + "--angle_between_processed_cams, in degrees."); +DEFINE_double(angle_between_processed_cams, 5.0, "See --dist_between_processed_cams."); DEFINE_string(sci_cam_timestamps, "", - "Process only these sci cam timestamps (rather than " - "any in the bag using --dist_between_processed_cams, etc.). Must be " - "a file with one timestamp per line."); + "Process only these sci cam timestamps (rather than any in the bag using " + "--dist_between_processed_cams, etc.). Must be a file with one timestamp " + "per line."); DEFINE_double(max_iso_times_exposure, 5.1, "Apply the inverse gamma transform to " "images, multiply them by max_iso_times_exposure/ISO/exposure_time " @@ -1388,6 +1429,12 @@ DEFINE_double(depth_hole_fill_diameter, 30.0, DEFINE_double(reliability_weight_exponent, 2.0, "A larger value will give more weight to depth points corresponding to " "pixels closer to depth image center, which are considered more reliable."); +DEFINE_double(max_ray_length, 2.0, + "Process haz cam depth image points no further than " + "this distance from the camera."); +DEFINE_double(voxel_size, 0.01, + "The grid size used for binning depth cloud points and creating the mesh. " + "Measured in meters."); DEFINE_bool(simulated_data, false, "If specified, use data recorded in simulation. " "Then haz and sci camera poses and intrinsics should be recorded in the bag file."); @@ -1613,13 +1660,15 @@ int main(int argc, char** argv) { nav_to_cam_trans[it], haz_cam_depth_to_image_transform, // NOLINT FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, // NOLINT FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, // NOLINT - FLAGS_reliability_weight_exponent, median_filter_params, // NOLINT + FLAGS_reliability_weight_exponent, FLAGS_max_ray_length, // NOLINT + FLAGS_voxel_size, median_filter_params, // NOLINT bag_handles[cam_type]->bag_msgs, // desired cam msgs // NOLINT *nav_cam_msgs_ptr, // nav msgs // NOLINT haz_cam_points_handle.bag_msgs, nav_cam_bag_timestamps, // NOLINT sci_cam_exif, FLAGS_output_dir, // NOLINT cam_dirs[cam_type], FLAGS_start, FLAGS_duration, // NOLINT FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, // NOLINT + FLAGS_angle_between_processed_cams, // NOLINT sci_cam_timestamps, FLAGS_max_iso_times_exposure, // NOLINT sparse_map, FLAGS_use_brisk_map, // NOLINT do_haz_cam_image, sim_cam_poses[it], FLAGS_save_debug_data); // NOLINT diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index 7166adb8..fc265d12 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -115,8 +115,15 @@ def process_args(args): "--dist_between_processed_cams", dest="dist_between_processed_cams", default="0.1", - help="Once an image or depth image is processed, how far the camera " - + "should move (in meters) before it should process more data.", + help="Once an image or depth cloud is processed, process a new one whenever either " + + "the camera moves by more than this distance, in meters, or the angle changes by more " + + "than --angle_between_processed_cams, in degrees.", + ) + parser.add_argument( + "--angle_between_processed_cams", + dest="angle_between_processed_cams", + default="5.0", + help="See --dist_between_processed_cams.", ) parser.add_argument( "--sci_cam_timestamps", @@ -180,8 +187,8 @@ def process_args(args): "--voxel_size", dest="voxel_size", default="0.01", - help="When fusing the depth point clouds use a voxel of this size, " - + "in meters.", + help="The grid size used for binning depth cloud points and creating the mesh. " + + "Measured in meters.", ) parser.add_argument( "--voxblox_integrator", @@ -478,6 +485,8 @@ def compute_poses_and_clouds(geometry_mapper_path, args): args.sampling_spacing_seconds, "--dist_between_processed_cams", args.dist_between_processed_cams, + "--angle_between_processed_cams", + args.angle_between_processed_cams, "--max_iso_times_exposure", args.max_iso_times_exposure, "--depth_exclude_columns", @@ -490,6 +499,10 @@ def compute_poses_and_clouds(geometry_mapper_path, args): args.depth_hole_fill_diameter, "--reliability_weight_exponent", args.reliability_weight_exponent, + "--max_ray_length", + args.max_ray_length, + "--voxel_size", + args.voxel_size, "--median_filters", args.median_filters, ] + args.localization_options.split(" ") diff --git a/dense_map/geometry_mapper/tools/streaming_mapper.cc b/dense_map/geometry_mapper/tools/streaming_mapper.cc index 5543c951..068eab5e 100644 --- a/dense_map/geometry_mapper/tools/streaming_mapper.cc +++ b/dense_map/geometry_mapper/tools/streaming_mapper.cc @@ -91,7 +91,7 @@ class StreamingMapper { void SciCamExifCallback(std_msgs::Float64MultiArray::ConstPtr const& exif); void CompressedTextureCallback(const sensor_msgs::CompressedImageConstPtr& msg); void UncompressedTextureCallback(const sensor_msgs::ImageConstPtr& msg); - void AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3d const& nav_cam_pose); + void AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3d const& nav_cam_to_world); void WipeOldImages(); std::shared_ptr image_transport; @@ -134,13 +134,14 @@ class StreamingMapper { // Data indexed by timestamp std::map nav_cam_localization_poses; std::map nav_cam_landmark_poses; - std::map texture_cam_poses; + std::map texture_cam_to_world_map; std::map texture_cam_images; std::map > sci_cam_exif; // For processing pictures as the camera moves along - double dist_between_processed_cams, max_iso_times_exposure; - Eigen::Vector3d m_last_processed_cam_ctr; + double m_dist_between_processed_cams, m_angle_between_processed_cams, m_max_iso_times_exposure; + Eigen::Vector3d m_prev_cam_ctr; + Eigen::Affine3d m_prev_cam_to_world; std::vector m_smallest_cost_per_face; int m_processed_camera_count; double num_exclude_boundary_pixels; @@ -167,7 +168,8 @@ StreamingMapper::StreamingMapper() m_nav_cam_to_texture_cam_timestamp_offset(0.0), m_texture_cam_to_nav_cam_trans(Eigen::MatrixXd::Identity(4, 4)), m_nav_cam_to_body_trans(Eigen::MatrixXd::Identity(4, 4)), - m_last_processed_cam_ctr(Eigen::Vector3d(std::numeric_limits::quiet_NaN(), 0.0, 0.0)), + m_prev_cam_ctr(Eigen::Vector3d(std::numeric_limits::quiet_NaN(), 0.0, 0.0)), + m_prev_cam_to_world(Eigen::Affine3d::Identity()), m_processed_camera_count(0) {} StreamingMapper::~StreamingMapper(void) { thread->join(); } @@ -217,10 +219,8 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { m_nav_cam_to_texture_cam_timestamp_offset = nav_to_cam_timestamp_offsets[0]; m_texture_cam_params = cam_params[0]; - std::cout << "nav_cam_to_texture_cam_timestamp_offset: " - << m_nav_cam_to_texture_cam_timestamp_offset << "\n"; - std::cout << "texture cam focal vector: " - << m_texture_cam_params.GetFocalVector().transpose() << "\n"; + ROS_INFO_STREAM("nav_cam_to_texture_cam_timestamp_offset: " + << m_nav_cam_to_texture_cam_timestamp_offset << "\n"); } } @@ -320,9 +320,11 @@ void StreamingMapper::ReadParams(ros::NodeHandle const& nh) { if (!config_params.GetStr("texture_cam_type", &m_texture_cam_type)) ROS_FATAL("Could not read the texture_cam_type parameter."); if (!config_params.GetStr("mesh_file", &mesh_file)) ROS_FATAL("Could not read the mesh_file parameter."); - if (!config_params.GetReal("dist_between_processed_cams", &dist_between_processed_cams)) + if (!config_params.GetReal("dist_between_processed_cams", &m_dist_between_processed_cams)) ROS_FATAL("Could not read the dist_between_processed_cams parameter."); - if (!config_params.GetReal("max_iso_times_exposure", &max_iso_times_exposure)) + if (!config_params.GetReal("angle_between_processed_cams", &m_angle_between_processed_cams)) + ROS_FATAL("Could not read the angle_between_processed_cams parameter."); + if (!config_params.GetReal("max_iso_times_exposure", &m_max_iso_times_exposure)) ROS_FATAL("Could not read the max_iso_times_exposure parameter."); if (!config_params.GetReal("pixel_size", &pixel_size)) ROS_FATAL("Could not read the pixel_size parameter."); if (!config_params.GetReal("num_exclude_boundary_pixels", &num_exclude_boundary_pixels)) @@ -358,17 +360,18 @@ void StreamingMapper::ReadParams(ros::NodeHandle const& nh) { << "its distortion is not modeled.\n"; ROS_INFO_STREAM("Streaming mapper parameters:"); - ROS_INFO_STREAM("dist_between_processed_cams = " << dist_between_processed_cams); - ROS_INFO_STREAM("max_iso_times_exposure = " << max_iso_times_exposure); - ROS_INFO_STREAM("use_single_texture = " << use_single_texture); - ROS_INFO_STREAM("sci_cam_exposure_correction = " << m_sci_cam_exp_corr); - ROS_INFO_STREAM("pixel_size = " << pixel_size); - ROS_INFO_STREAM("num_threads = " << num_threads); - ROS_INFO_STREAM("sim_mode = " << m_sim_mode); - ROS_INFO_STREAM("save_to_disk = " << save_to_disk); - ROS_INFO_STREAM("num_exclude_boundary_pixels = " << num_exclude_boundary_pixels); - ROS_INFO_STREAM("ASTROBEE_ROBOT = " << getenv("ASTROBEE_ROBOT")); - ROS_INFO_STREAM("ASTROBEE_WORLD = " << getenv("ASTROBEE_WORLD")); + ROS_INFO_STREAM("dist_between_processed_cams = " << m_dist_between_processed_cams); + ROS_INFO_STREAM("angle_between_processed_cams = " << m_angle_between_processed_cams); + ROS_INFO_STREAM("max_iso_times_exposure = " << m_max_iso_times_exposure); + ROS_INFO_STREAM("use_single_texture = " << use_single_texture); + ROS_INFO_STREAM("sci_cam_exposure_correction = " << m_sci_cam_exp_corr); + ROS_INFO_STREAM("pixel_size = " << pixel_size); + ROS_INFO_STREAM("num_threads = " << num_threads); + ROS_INFO_STREAM("sim_mode = " << m_sim_mode); + ROS_INFO_STREAM("save_to_disk = " << save_to_disk); + ROS_INFO_STREAM("num_exclude_boundary_pixels = " << num_exclude_boundary_pixels); + ROS_INFO_STREAM("ASTROBEE_ROBOT = " << getenv("ASTROBEE_ROBOT")); + ROS_INFO_STREAM("ASTROBEE_WORLD = " << getenv("ASTROBEE_WORLD")); // For a camera like sci_cam, the word "sci" better be part of texture_cam_topic. std::string cam_name = m_texture_cam_type.substr(0, 3); @@ -529,16 +532,16 @@ void StreamingMapper::TextureCamSimPoseCallback(const geometry_msgs::PoseStamped const std::lock_guard lock(texture_cam_pose_lock); // Convert the pose to Eigen::Affine3d - Eigen::Affine3d texture_cam_pose; - tf::poseMsgToEigen(pose->pose, texture_cam_pose); - texture_cam_poses[pose->header.stamp.toSec()] = texture_cam_pose; + Eigen::Affine3d texture_cam_to_world; + tf::poseMsgToEigen(pose->pose, texture_cam_to_world); + texture_cam_to_world_map[pose->header.stamp.toSec()] = texture_cam_to_world; // std::cout << "Received the sim texture cam pose at time: " << pose->header.stamp.toSec() // << "\n"; // Wipe the oldest poses. Keep a lot of them as sometimes they can come very often. - while (texture_cam_poses.size() > 100) { - texture_cam_poses.erase(texture_cam_poses.begin()); + while (texture_cam_to_world_map.size() > 100) { + texture_cam_to_world_map.erase(texture_cam_to_world_map.begin()); } } @@ -576,8 +579,9 @@ void StreamingMapper::TextureCamSimInfoCallback(const sensor_msgs::CameraInfo::C // Compute the texture cam pose and timestamp based on the nav cam pose // and timestamp, and save it in the storage. -void StreamingMapper::AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3d const& nav_cam_pose) { - if (nav_cam_pose.matrix() == Eigen::Matrix::Identity()) { +void StreamingMapper::AddTextureCamPose(double nav_cam_timestamp, + Eigen::Affine3d const& nav_cam_to_world) { + if (nav_cam_to_world.matrix() == Eigen::Matrix::Identity()) { // Skip bad poses return; } @@ -586,19 +590,20 @@ void StreamingMapper::AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3 // have some time delay among them double texture_cam_timestamp = nav_cam_timestamp + m_nav_cam_to_texture_cam_timestamp_offset; - // Keep in mind that nav_cam_pose is the transform from the nav cam + // Keep in mind that nav_cam_to_world is the transform from the nav cam // to the world. Hence the matrices are multiplied in the order as // below. - Eigen::Affine3d texture_cam_pose; - texture_cam_pose.matrix() = nav_cam_pose.matrix() * m_texture_cam_to_nav_cam_trans; + Eigen::Affine3d texture_cam_to_world; + texture_cam_to_world.matrix() = nav_cam_to_world.matrix() * m_texture_cam_to_nav_cam_trans; { // Add the latest pose. Use a lock. const std::lock_guard lock(texture_cam_pose_lock); - texture_cam_poses[texture_cam_timestamp] = texture_cam_pose; + texture_cam_to_world_map[texture_cam_timestamp] = texture_cam_to_world; // Wipe the oldest poses. Keep a lot of them, as sometimes, particularly for ekf, // they can come very fast - while (texture_cam_poses.size() > 100) texture_cam_poses.erase(texture_cam_poses.begin()); + while (texture_cam_to_world_map.size() > 100) + texture_cam_to_world_map.erase(texture_cam_to_world_map.begin()); } // TODO(oalexan1): This will need more testing! } @@ -608,10 +613,10 @@ void StreamingMapper::LandmarkCallback(ff_msgs::VisualLandmarks::ConstPtr const& if (m_sim_mode) return; double nav_cam_timestamp = vl->header.stamp.toSec(); - Eigen::Affine3d nav_cam_pose; - tf::poseMsgToEigen(vl->pose, nav_cam_pose); + Eigen::Affine3d nav_cam_to_world; + tf::poseMsgToEigen(vl->pose, nav_cam_to_world); - StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); + StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_to_world); } // This callback takes as input the robot pose as output by ekf on @@ -626,9 +631,9 @@ void StreamingMapper::EkfStateCallback(ff_msgs::EkfState::ConstPtr const& ekf_st // Convert from body pose (body to world transform) to nav cam pose (nav cam to world transform) // Use the equation: body_to_world = nav_cam_to_world * body_to_nav_cam, written equivalently as: // nav_cam_to_world = body_to_world * nav_cam_to_body - Eigen::Affine3d nav_cam_pose; - nav_cam_pose.matrix() = body_pose.matrix() * m_nav_cam_to_body_trans; - StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); + Eigen::Affine3d nav_cam_to_world; + nav_cam_to_world.matrix() = body_pose.matrix() * m_nav_cam_to_body_trans; + StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_to_world); } // This callback takes as input the robot pose as output by ekf on /loc/pose. @@ -642,9 +647,9 @@ void StreamingMapper::EkfPoseCallback(geometry_msgs::PoseStamped::ConstPtr const // Convert from body pose (body to world transform) to nav cam pose (nav cam to world transform) // Use the equation: body_to_world = nav_cam_to_world * body_to_nav_cam, written equivalently as: // nav_cam_to_world = body_to_world * nav_cam_to_body - Eigen::Affine3d nav_cam_pose; - nav_cam_pose.matrix() = body_pose.matrix() * m_nav_cam_to_body_trans; - StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); + Eigen::Affine3d nav_cam_to_world; + nav_cam_to_world.matrix() = body_pose.matrix() * m_nav_cam_to_body_trans; + StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_to_world); } // This callback takes as input the robot pose as output by ekf on /loc/pose. @@ -743,12 +748,9 @@ void StreamingMapper::UncompressedTextureCallback(const sensor_msgs::ImageConstP WipeOldImages(); } -// Iterate over the texture images. Overlay them on the 3D model and publish -// the result. -// Need to consider here if they are -// full res and compressed and color, or 1/4 res and grayscale and -// and uncompressed -// +// Iterate over the texture images. Overlay them on the 3D model and +// publish the result. Need to consider here if they are full res and +// compressed and color, or 1/4 res and grayscale and and uncompressed. void StreamingMapper::ProcessingLoop() { double last_attempted_texture_cam_timestamp = -1.0; @@ -772,10 +774,10 @@ void StreamingMapper::ProcessingLoop() { // Safely copy the data we need (this data is small, so copying is cheap). - std::map local_texture_cam_poses; + std::map local_texture_cam_to_world_map; { const std::lock_guard lock(texture_cam_pose_lock); - local_texture_cam_poses = texture_cam_poses; + local_texture_cam_to_world_map = texture_cam_to_world_map; } camera::CameraParameters local_texture_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), @@ -812,16 +814,16 @@ void StreamingMapper::ProcessingLoop() { break; } - if (local_texture_cam_poses.empty()) { + if (local_texture_cam_to_world_map.empty()) { break; // no poses yet } // Ensure that we can bracket the texture image timestamp between // the timestamps of the poses - if (texture_cam_image_timestamp > local_texture_cam_poses.rbegin()->first) { + if (texture_cam_image_timestamp > local_texture_cam_to_world_map.rbegin()->first) { continue; } - if (texture_cam_image_timestamp < local_texture_cam_poses.begin()->first) { + if (texture_cam_image_timestamp < local_texture_cam_to_world_map.begin()->first) { continue; } @@ -860,26 +862,32 @@ void StreamingMapper::ProcessingLoop() { last_attempted_texture_cam_timestamp = texture_cam_image_timestamp; // Find the interpolated pose at the current image - Eigen::Affine3d curr_texture_cam_pose; + Eigen::Affine3d curr_cam_to_world; - if (!dense_map::findInterpPose(texture_cam_image_timestamp, local_texture_cam_poses, - curr_texture_cam_pose)) + if (!dense_map::findInterpPose(texture_cam_image_timestamp, local_texture_cam_to_world_map, + curr_cam_to_world)) LOG(FATAL) << "Could not bracket the timestamp. Should have worked at this stage."; - camera::CameraModel cam(curr_texture_cam_pose.inverse(), local_texture_cam_params); + camera::CameraModel cam(curr_cam_to_world.inverse(), local_texture_cam_params); - Eigen::Vector3d cam_ctr = cam.GetPosition(); - double dist_to_prev_processed = (m_last_processed_cam_ctr - cam_ctr).norm(); - bool do_process = (std::isnan(dist_to_prev_processed) || // for the first camera to process - dist_to_prev_processed > dist_between_processed_cams); + Eigen::Vector3d curr_cam_ctr = cam.GetPosition(); - // if (!std::isnan(dist_to_prev_processed)) - // ROS_INFO_STREAM("Distance from current camera to last processed camera: " - // << dist_to_prev_processed << " meters."); + double curr_dist_bw_cams = (m_prev_cam_ctr - curr_cam_ctr).norm(); + Eigen::Affine3d world_pose_change + = curr_cam_to_world * (m_prev_cam_to_world.inverse()); + double angle_diff = dense_map::maxRotationAngle(world_pose_change); - // Current camera is too close to previously processed camera, hence wait for the bot to move - if (!do_process) { - // ROS_INFO_STREAM("Won't process this camera image, as it is too close to previous one."); + // Useful for testing + // if (!std::isnan(m_prev_cam_ctr[0])) + // std::cout << std::setprecision(4) + // << "Position and orientation changes relative to prev. processed cam are " + // << curr_dist_bw_cams << " m and " << angle_diff <<" deg.\n"; + + bool skip_processing = (!std::isnan(m_prev_cam_ctr[0]) && + curr_dist_bw_cams < m_dist_between_processed_cams && + angle_diff < m_angle_between_processed_cams); + if (skip_processing) { + // std::cout << "Skipping this camera." << std::endl; continue; } @@ -902,12 +910,13 @@ void StreamingMapper::ProcessingLoop() { std::ostringstream oss; oss << "processed_" << 1000 + m_processed_camera_count; // add 1000 to list them nicely std::string out_prefix = oss.str(); - publishTexturedMesh(mesh, bvh_tree, max_iso_times_exposure, iso, exposure, + publishTexturedMesh(mesh, bvh_tree, m_max_iso_times_exposure, iso, exposure, m_processed_camera_count, texture_cam_image, texture_cam_image_timestamp, cam, m_smallest_cost_per_face, out_prefix); // Save this for next time - m_last_processed_cam_ctr = cam_ctr; + m_prev_cam_ctr = curr_cam_ctr; + m_prev_cam_to_world = curr_cam_to_world; m_processed_camera_count++; std::cout << "Texture processing took " << timer.get_elapsed() / 1000.0 << " seconds\n"; diff --git a/isaac/config/dense_map/streaming_mapper.config b/isaac/config/dense_map/streaming_mapper.config index 5085b45e..5c7cd660 100644 --- a/isaac/config/dense_map/streaming_mapper.config +++ b/isaac/config/dense_map/streaming_mapper.config @@ -44,22 +44,31 @@ nav_cam_pose_topic = ""; -- A backup topic to get the pose from, rarely used ekf_pose_topic = ""; +-- Type of camera to texture, and its image topic + texture_cam_type = "sci_cam"; +texture_cam_topic = "/hw/cam_sci/compressed"; + +-- texture_cam_type = "haz_cam"; +-- texture_cam_topic = "/hw/depth_haz/extended/amplitude_int"; + +-- texture_cam_type = "nav_cam"; +-- texture_cam_topic = "/hw/cam_nav"; + +-- texture_cam_type = "heat_cam"; +-- texture_cam_topic = "/hw/cam_heat"; --- Image topic for the camera to texture -texture_cam_topic = "/hw/cam_sci/compressed"; -- sci_cam --- texture_cam_topic = "/hw/depth_haz/extended/amplitude_int"; -- haz_cam --- texture_cam_topic = "/hw/cam_nav"; -- nav_cam --- texture_cam_topic = "/hw/cam_heat"; -- heat_cam --- texture_cam_topic = "/hw/cam_acoustics"; -- acoustics_cam +-- texture_cam_type = "acoustics_cam"; +-- texture_cam_topic = "/hw/cam_acoustics"; -dist_between_processed_cams = 0.10; -max_iso_times_exposure = 5.1; -sci_cam_exposure_correction = true; +dist_between_processed_cams = 0.10; +angle_between_processed_cams = 5.0; +max_iso_times_exposure = 5.1; +sci_cam_exposure_correction = true; -use_single_texture = true; -pixel_size = 0.001; -num_threads = 48; +use_single_texture = true; +pixel_size = 0.001; +num_threads = 48; -- This is measured at full resolution, not the reduced resolution -- used in calibration. From 3a50d508e3c331449a818b19d46d5e9b77de7813 Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Fri, 7 Jan 2022 11:35:38 -0800 Subject: [PATCH 23/40] Minor update to the geometry mapper doc to use hyperlinks --- dense_map/geometry_mapper/readme.md | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index c433f740..e0ca7e85 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -620,20 +620,18 @@ do: ## Map building and registration Build and register a SURF sparse map with the nav cam images. (This is -needed only with real data.) - -An example for how to collect images and build a map is shown later in -this doc, in the section about camera refinement. See also the -reference documentation in - - $ASTROBEE_SOURCE_PATH/localization/sparse_mapping/readme.md - -and also in build_map.md in that repository. +needed only with real data.) See the +[sparse mapping](https://nasa.github.io/astrobee/html/sparsemapping.html) +documentation in the Astrobee repository, with more details given in +the [map building](https://nasa.github.io/astrobee/html/map_building.html) +page. If the map to be built is large, consider using the Theia SfM -software. Its usage is described in: +software. See the [Theia documentation](https://nasa.github.io/astrobee/html/theia_map.html) +for how to use this package to create Astrobee sparse maps. - https://nasa.github.io/astrobee/html/theia_map.html +An example for how to build a map in the context of calibration is +also given further down this document. This SURF map will be used with the geometry mapper. Rebuild it with BRISK features, to be used with the streaming mapper. Examine the From 748758d503c9ed6bf8f1d5ab7e380770d9c7d49b Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Fri, 7 Jan 2022 13:20:01 -0800 Subject: [PATCH 24/40] geometry_mapper: Very minor formating fixes --- dense_map/geometry_mapper/readme.md | 76 ++++++++++++++--------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index e0ca7e85..60dad495 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -383,7 +383,7 @@ builds upon the instructions used in the doc referenced right above.) Note that above we assume that the image sampler was used to collect a subset of the nav cam images. Otherwise the nav cam topic would be -/hw/cam_nav. +``/hw/cam_nav``. This will create three directories with the corners extracted from the nav, haz, and sci cameras. @@ -636,8 +636,8 @@ also given further down this document. This SURF map will be used with the geometry mapper. Rebuild it with BRISK features, to be used with the streaming mapper. Examine the BRISK obtained map. If it does not have enough features, rebuild it -with a lower value of -default_brisk_threshold and --max_brisk_threshold (For example, use 70 instead of the default of +with a lower value of ``--default_brisk_threshold`` and +``--max_brisk_threshold`` (For example, use 70 instead of the default of 90. This may make the sparse map bigger.) It is suggested to not use the ``--histogram_equalization`` flag for the @@ -683,7 +683,7 @@ Ensure that the bot name is correct below. Set ``ASTROBEE_SOURCE_PATH``, --sparse_map nav_data.map \ --camera_types "sci_cam nav_cam haz_cam" \ --camera_topics "/hw/cam_sci/compressed /mgt/img_sampler/nav_cam/image_record /hw/depth_haz/extended/amplitude_int"\ - --undistorted_crop_wins "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160" \ + --undistorted_crop_wins "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,250,200" \ --haz_cam_points_topic /hw/depth_haz/points \ --start 0 \ --duration 1e+10 \ @@ -724,7 +724,7 @@ Parameters: numbers are at 1/4th of the full resolution (resolution of calibration) and will be adjusted for the actual input image dimensions. Use a list in quotes. The default is - "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160". + "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,250,200". --haz_cam_points_topic: The depth point cloud topic in the bag file. --start: How many seconds into the bag to start processing the data. --duration: For how many seconds to do the processing. @@ -858,13 +858,13 @@ restricted to the range of timestamps contained within the sparse map `--duration``). If this tool is too slow, or if localization fails, consider adjusting -the --localization_options above. For example, to make localization +the ``--localization_options`` above. For example, to make localization work better (which will make the program slower) decrease the value of ---default_surf_threshold, and increase --early_break_landmarks, ---min_surf_features, and --max_surf_features. To make it faster, +``--default_surf_threshold``, and increase ``--early_break_landmarks``, +``--min_surf_features``, and ``--max_surf_features``. To make it faster, do the opposite. -The values of --depth_exclude_columns and --depth_exclude_rows +The values of ``--depth_exclude_columns`` and ``--depth_exclude_rows`` can be adjusted to remove rows and columns at the margins which may result in a nicer mesh. If, for example, the bot moves upwards or downwards, there is little loss in removing @@ -874,18 +874,18 @@ with good lateral overlap, removing some columns won't result in much loss but may remove some noise. If it is desired to use only a precise subset of the sci cam images, -specify those with the option --sci_cam_timestamps. +specify those with the option ``--sci_cam_timestamps``. If several acquisitions were performed, and the geometry mapper was run with each of them, those can be merged by invoking the -geometry mapper with the option --merge_maps. +geometry mapper with the option ``--merge_maps``. The geometry mapper can run with a previously created mesh if invoked -with the option --external_mesh. +with the option ``--external_mesh``. The most time-consuming part of the geometry mapper is computing the initial poses, which is the earliest step, or step 0. To resume the -geometry mapper at any step, use the option '--start_step num'. For +geometry mapper at any step, use the option ``--start_step num``. For example, one may want to apply further smoothing to the mesh or more hole-filling, before resuming with the next steps. @@ -979,7 +979,7 @@ To run the streaming mapper with real data for the given bot, do: Wait until it finishes forming the texture model, which may take 30 seconds on more. -Ensure that the ASTROBEE_ROBOT name is correct above. +Ensure that the ``ASTROBEE_ROBOT`` name is correct above. This node will load the mesh produced by the geometry mapper from @@ -1005,11 +1005,11 @@ default, is ``sci_cam``. Its image topic must be set, which, for while for ``haz_cam`` is: - /hw/depth_haz/extended/amplitude_int + /hw/depth_haz/extended/amplitude_int For ``nav_cam`` the image topic is either - /mgt/img_sampler/nav_cam/image_record + /mgt/img_sampler/nav_cam/image_record if the images are played from a bag which recorded the images produced with the image sampler, or @@ -1239,8 +1239,8 @@ sci_cam, are: /sim/sci_cam/pose /sim/sci_cam/info -Hence, the parameters ekf_state_topic, ekf_pose_topic, and -nav_cam_pose_topic are ignored. +Hence, the parameters ``ekf_state_topic``, ``ekf_pose_topic``, and +``nav_cam_pose_topic`` are ignored. The streaming mapper will publish its results on topics mentioned earlier in the text. @@ -1470,22 +1470,22 @@ This tool will print some statistics showing the residual errors before and after each optimization pass (before outlier removal at the end of the pass), as follows: -The 25, 50, 75, and 100th percentile residual stats after opt -depth_mesh_x_m: 0.0013406233 0.0032581503 0.008524976 1.1929607 (3551 residuals) -depth_mesh_y_m: 0.0013847081 0.0030440421 0.0069465355 1.8053954 (3551 residuals) -depth_mesh_z_m: 0.00095448611 0.001959229 0.003519088 0.37953008 (3551 residuals) -depth_tri_x_m: 0.0016825461 0.0038125877 0.0081855522 2.3030685 (3573 residuals) -depth_tri_y_m: 0.0018114713 0.0042167015 0.0095410033 2.5599911 (3573 residuals) -depth_tri_z_m: 0.0012253625 0.0026915793 0.0054980693 0.82675259 (3573 residuals) -haz_cam_pix_x: 0.27697292 0.5747702 1.1103665 74339.066 (3716 residuals) -haz_cam_pix_y: 0.11680463 0.28538243 0.63062182 26071.257 (3716 residuals) -mesh_tri_x_m: 0.00054776584 0.0019058208 0.0062554694 4.7597716 (24034 residuals) -mesh_tri_y_m: 0.0004664598 0.0018264008 0.0061599388 3.669907 (24034 residuals) -mesh_tri_z_m: 0.00015713815 0.00064641858 0.0021773666 1.062293 (24034 residuals) -nav_cam_pix_x: 0.080328781 0.21718455 0.48093808 618.45992 (206598 residuals) -nav_cam_pix_y: 0.096269135 0.23056959 0.4939937 476.99626 (206598 residuals) -sci_cam_pix_x: 0.048502913 0.23643751 0.53009189 20.129477 (623 residuals) -sci_cam_pix_y: 0.21452953 0.47693424 1.0187402 25.362118 (623 residuals) + The 25, 50, 75, and 100th percentile residual stats after opt + depth_mesh_x_m: 0.0015756 0.004064 0.0099792 1.1882 (3566 residuals) + depth_mesh_y_m: 0.0015745 0.0036734 0.0088576 1.8041 (3566 residuals) + depth_mesh_z_m: 0.00091998 0.0019874 0.0038565 0.37353 (3566 residuals) + depth_tri_x_m: 0.00066995 0.0021602 0.0065155 2.7389 (3578 residuals) + depth_tri_y_m: 0.00069529 0.0022702 0.0069424 2.6967 (3578 residuals) + depth_tri_z_m: 0.00051406 0.0016069 0.0044358 1.1036 (3578 residuals) + haz_cam_pix_x: 0.23413 0.51717 1.0606 10353 (3723 residuals) + haz_cam_pix_y: 0.14605 0.33521 0.67331 1040.7 (3723 residuals) + mesh_tri_x_m: 0.0030461 0.0097438 0.023452 4.7647 (24231 residuals) + mesh_tri_y_m: 0.0027831 0.0085864 0.020538 3.1248 (24231 residuals) + mesh_tri_z_m: 0.00088227 0.0026434 0.0063236 1.1076 (24231 residuals) + nav_cam_pix_x: 0.055205 0.15801 0.37068 36.164 (206561 residuals) + nav_cam_pix_y: 0.096944 0.23234 0.49844 495.14 (206561 residuals) + sci_cam_pix_x: 0.020412 0.10584 0.29013 38.499 (635 residuals) + sci_cam_pix_y: 0.1585 0.34267 0.71541 30.158 (635 residuals) These can be helpful in figuring out if the calibration result is good. The errors whose name ends in "_m" are in meters and measure @@ -1504,9 +1504,9 @@ nav_cam_to_sci_cam_timestamp_offset, which can be non-zero if the HLP and MLP/LLP processors are not synchronized (the sci_cam pictures are acquired with the HLP and nav_cam with MLP/LLP). If this value is not known well, this tool can be run with zero or more iterations and -various values of +various values of: - --nav_cam_to_sci_cam_offset_override_value + --nav_cam_to_sci_cam_offset_override_value to see which value gives the smallest residuals. @@ -1796,9 +1796,9 @@ can be run as follows: --num_exclude_boundary_pixels 0 \ --output_prefix out -This will write out-1616785318.1400001.obj and its associated files. +This will write ``out-1616785318.1400001.obj`` and its associated files. -Ensure that the correct robot is specified in ASTROBEE_ROBOT. +Ensure that the correct robot is specified in ``ASTROBEE_ROBOT``. Alternatively, the images and cameras can be specified in lists, via ``--image_list`` and ``--camera_list``. From 00f169a9fc3d8dab43f217cba6e9a1e329afbb91 Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Thu, 13 Jan 2022 11:49:24 -0800 Subject: [PATCH 25/40] Use long instead of int in texture processing to avoid int32 overflow (#24) * Update to a fix I made in texrecon * doc: Mentino about what it takes to add new cameras * add_sci_cam_to_bag: Fix issue with message time being out of sync with message header time * Add --no_erode_boundary option * image_picker: Speedup * Use long instead of int in texture processing to avoid int32 overflow --- dense_map/geometry_mapper/CMakeLists.txt | 7 +- .../include/texture_processing.h | 142 +++--- dense_map/geometry_mapper/readme.md | 97 +++- .../geometry_mapper/src/texture_processing.cc | 413 ++++++++++-------- .../tools/add_sci_cam_to_bag.cc | 29 +- .../geometry_mapper/tools/camera_refiner.cc | 2 +- .../geometry_mapper/tools/geometry_mapper.cc | 16 +- .../geometry_mapper/tools/geometry_mapper.py | 29 +- .../geometry_mapper/tools/image_picker.cc | 9 +- .../geometry_mapper/tools/streaming_mapper.cc | 30 +- 10 files changed, 466 insertions(+), 308 deletions(-) diff --git a/dense_map/geometry_mapper/CMakeLists.txt b/dense_map/geometry_mapper/CMakeLists.txt index 9610b9a2..2af71ed8 100644 --- a/dense_map/geometry_mapper/CMakeLists.txt +++ b/dense_map/geometry_mapper/CMakeLists.txt @@ -91,10 +91,11 @@ set(TEXRECON_DIR ${CMAKE_BINARY_DIR}/texrecon) ExternalProject_Add(texrecon PREFIX ${TEXRECON_DIR} GIT_REPOSITORY https://github.com/oleg-alexandrov/mvs-texturing.git - GIT_TAG b6bac86 + GIT_TAG 015a2ac CMAKE_ARGS -DCMAKE_VERBOSE_MAKEFILE=TRUE -DCMAKE_CXX_FLAGS=-fPIC - BUILD_COMMAND $(MAKE) # Per: https://cmake.org/pipermail/cmake/2011-April/043772.html - # Set some options below to not have it always recreate CMakeLists files + BUILD_COMMAND $(MAKE) + # Per: https://cmake.org/pipermail/cmake/2011-April/043772.html, + # set some options below to not have it always recreate CMakeLists files BUILD_ALWAYS 0 UPDATE_COMMAND "" INSTALL_COMMAND cmake -E echo "Skipping install step for texrecon." diff --git a/dense_map/geometry_mapper/include/texture_processing.h b/dense_map/geometry_mapper/include/texture_processing.h index 373131d4..d64bcbdf 100644 --- a/dense_map/geometry_mapper/include/texture_processing.h +++ b/dense_map/geometry_mapper/include/texture_processing.h @@ -65,31 +65,46 @@ namespace dense_map { // and the number of samples needed (with given pixel size) to // sample that face (with a small padding on both sides). // Also store the transform from that plane to the face itself. + +// We use 'int64_t' values instead of 'int', as the latter is 32-bit and +// the area of some images we encounter can then overflow. + struct FaceInfo { double x, min_y, min_z; - Eigen::Affine3d YZPlaneToTriangleFace; - // We will have [min_y, min_y + width * pixel_size] x [min_z, min_z + height * pixel_size] // contain the face transformed in that plane with the normal pointing along the z axis. - int width, height; + int64_t width, height; - int padding; // The padding to apply to each face bounding box before - // sampling it + // The padding to apply to each face bounding box before sampling it + int64_t padding; // The pixel at (x, min_y, min_z) in the plane will end up at location (shift_u, shift_v) // in the texture. - int shift_u, shift_v; + int64_t shift_u, shift_v; + + // Used to flag a valid face + bool valid; + + // The transform which makes a version of the face in the y-z plane to the actual triangle + Eigen::Affine3d YZPlaneToTriangleFace; // The vertices of a face after being transformed to a plane with x constant std::vector TransformedVertices; - // Keep track of the fact that sometimes not all faces have their info - // initialized + // Initialize all members. Invalid or unprocessed faces will have + // shift_u == invalid_shift_u. FaceInfo() { - int max_int = std::numeric_limits::max(); - shift_u = max_int; - shift_v = max_int; + x = 0.0; + min_y = 0.0; + min_z = 0.0; + width = 0L; + height = 0L; + padding = 0L; + valid = true; + shift_u = std::numeric_limits::max(); + shift_v = 0L; + YZPlaneToTriangleFace = Eigen::Affine3d::Identity(); TransformedVertices.resize(3); } }; @@ -107,37 +122,37 @@ class IsaacTexturePatch { typedef std::vector Texcoords; private: - int label; + int64_t label; Faces faces; Texcoords texcoords; - int width, height; + int64_t width, height; public: /** Constructs a texture patch. */ - IsaacTexturePatch(int _label, std::vector const& faces, std::vector const& texcoords, - int width, int height); + IsaacTexturePatch(int64_t label, std::vector const& faces, std::vector const& texcoords, + int64_t width, int64_t height); IsaacTexturePatch(IsaacTexturePatch const& texture_patch); static IsaacTexturePatch::Ptr create(IsaacTexturePatch::ConstPtr texture_patch); - static IsaacTexturePatch::Ptr create(int label, std::vector const& faces, - std::vector const& texcoords, int width, int height); + static IsaacTexturePatch::Ptr create(int64_t label, std::vector const& faces, + std::vector const& texcoords, int64_t width, int64_t height); IsaacTexturePatch::Ptr duplicate(void); std::vector& get_faces(void); std::vector const& get_faces(void) const; - std::vector& get_texcoords(void); + std::vector & get_texcoords(void); std::vector const& get_texcoords(void) const; - int get_label(void) const; - int get_width(void) const; - int get_height(void) const; - int get_size(void) const; + int64_t get_label(void) const; + int64_t get_width(void) const; + int64_t get_height(void) const; + int64_t get_size(void) const; }; -inline IsaacTexturePatch::IsaacTexturePatch(int label, std::vector const& faces, - std::vector const& texcoords, int width, int height) +inline IsaacTexturePatch::IsaacTexturePatch(int64_t label, std::vector const& faces, + std::vector const& texcoords, int64_t width, int64_t height) : label(label), faces(faces), texcoords(texcoords), width(width), height(height) {} IsaacTexturePatch::IsaacTexturePatch(IsaacTexturePatch const& texture_patch) { @@ -152,29 +167,35 @@ inline IsaacTexturePatch::Ptr IsaacTexturePatch::create(IsaacTexturePatch::Const return std::make_shared(*texture_patch); } -inline IsaacTexturePatch::Ptr IsaacTexturePatch::create(int label, std::vector const& faces, - std::vector const& texcoords, int width, - int height) { +inline IsaacTexturePatch::Ptr IsaacTexturePatch::create(int64_t label, std::vector const& faces, + std::vector const& texcoords, int64_t width, + int64_t height) { return std::make_shared(label, faces, texcoords, width, height); } -inline IsaacTexturePatch::Ptr IsaacTexturePatch::duplicate(void) { return Ptr(new IsaacTexturePatch(*this)); } +inline IsaacTexturePatch::Ptr IsaacTexturePatch::duplicate(void) { + return Ptr(new IsaacTexturePatch(*this)); +} -inline int IsaacTexturePatch::get_label(void) const { return label; } +inline int64_t IsaacTexturePatch::get_label(void) const { return label; } -inline int IsaacTexturePatch::get_width(void) const { return width; } +inline int64_t IsaacTexturePatch::get_width(void) const { return width; } -inline int IsaacTexturePatch::get_height(void) const { return height; } +inline int64_t IsaacTexturePatch::get_height(void) const { return height; } -inline std::vector& IsaacTexturePatch::get_texcoords(void) { return texcoords; } +inline std::vector& IsaacTexturePatch::get_texcoords(void) { + return texcoords; +} inline std::vector& IsaacTexturePatch::get_faces(void) { return faces; } -inline std::vector const& IsaacTexturePatch::get_texcoords(void) const { return texcoords; } +inline std::vector const& IsaacTexturePatch::get_texcoords(void) const { + return texcoords; +} inline std::vector const& IsaacTexturePatch::get_faces(void) const { return faces; } -inline int IsaacTexturePatch::get_size(void) const { return get_width() * get_height(); } +inline int64_t IsaacTexturePatch::get_size(void) const { return get_width() * get_height(); } /** * Class representing a texture atlas. @@ -187,12 +208,12 @@ class IsaacTextureAtlas { typedef std::vector TexcoordIds; typedef std::vector Texcoords; - unsigned int get_width(); - unsigned int get_height(); + int64_t get_width(); + int64_t get_height(); private: - unsigned int width, height, determined_height; - unsigned int const padding; + int64_t width, height, determined_height; + int64_t const padding; bool finalized; Faces faces; @@ -206,9 +227,9 @@ class IsaacTextureAtlas { void resize_atlas(void); public: - IsaacTextureAtlas(unsigned int width, unsigned int height); + IsaacTextureAtlas(int64_t width, int64_t height); - static IsaacTextureAtlas::Ptr create(unsigned int width, unsigned int height); + static IsaacTextureAtlas::Ptr create(int64_t width, int64_t height); Faces& get_faces(void); TexcoordIds& get_texcoord_ids(void); @@ -224,28 +245,31 @@ class IsaacTextureAtlas { void finalize(void); }; -inline IsaacTextureAtlas::Ptr IsaacTextureAtlas::create(unsigned int width, unsigned int height) { +inline IsaacTextureAtlas::Ptr IsaacTextureAtlas::create(int64_t width, int64_t height) { return Ptr(new IsaacTextureAtlas(width, height)); } inline IsaacTextureAtlas::Faces& IsaacTextureAtlas::get_faces(void) { return faces; } -inline IsaacTextureAtlas::TexcoordIds& IsaacTextureAtlas::get_texcoord_ids(void) { return texcoord_ids; } +inline IsaacTextureAtlas::TexcoordIds& IsaacTextureAtlas::get_texcoord_ids(void) { + return texcoord_ids; +} inline IsaacTextureAtlas::Texcoords& IsaacTextureAtlas::get_texcoords(void) { return texcoords; } inline mve::ByteImage::Ptr& IsaacTextureAtlas::get_image(void) { return image; } -inline unsigned int IsaacTextureAtlas::get_width() { return width; } +inline int64_t IsaacTextureAtlas::get_width() { return width; } -inline unsigned int IsaacTextureAtlas::get_height() { return height; } +inline int64_t IsaacTextureAtlas::get_height() { return height; } // Load and prepare a mesh void loadMeshBuildTree(std::string const& mesh_file, mve::TriangleMesh::Ptr& mesh, - std::shared_ptr& mesh_info, std::shared_ptr& graph, + std::shared_ptr& mesh_info, + std::shared_ptr& graph, std::shared_ptr& bvh_tree); -void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_threads, +void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_threads, // outputs std::vector& face_projection_info, std::vector& texture_atlases, tex::Model& model); @@ -255,7 +279,8 @@ void formObj(tex::Model& texture_model, std::string const& out_prefix, std::stri // Put an textured mesh obj file in a string void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector const& face_vec, - std::map const& uv_map, std::string const& out_prefix, std::string& obj_str); + std::map const& uv_map, + std::string const& out_prefix, std::string& obj_str); void formMtl(std::string const& out_prefix, std::string& mtl_str); @@ -274,21 +299,14 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Project texture on a texture model that was pre-filled already, so // only the texture pixel values need to be computed -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, - cv::Mat const& image, camera::CameraModel const& cam, - std::vector& smallest_cost_per_face, - double pixel_size, int num_threads, - std::vector const& face_projection_info, - std::vector& texture_atlases, tex::Model& model, - cv::Mat& out_texture); - -void meshProject(mve::TriangleMesh::Ptr const& mesh, - std::shared_ptr const& bvh_tree, - cv::Mat const& image, - Eigen::Affine3d const& world_to_cam, - camera::CameraParameters const& cam_params, - int num_exclude_boundary_pixels, - std::string const& out_prefix); +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, + camera::CameraModel const& cam, std::vector& smallest_cost_per_face, double pixel_size, + int64_t num_threads, std::vector const& face_projection_info, + std::vector& texture_atlases, tex::Model& model, cv::Mat& out_texture); + +void meshProject(mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, cv::Mat const& image, + Eigen::Affine3d const& world_to_cam, camera::CameraParameters const& cam_params, + int64_t num_exclude_boundary_pixels, std::string const& out_prefix); // Save a model void isaac_save_model(ObjModel* obj_model, std::string const& prefix); diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 60dad495..8f6c0360 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -109,6 +109,60 @@ The acoustics camera and how to enable it is documented at: $ISAAC_WS/src/astrobee/simulation/acoustics_cam/readme.md +## Adding support for new cameras + +For an actual camera, rather than a simulated one, the files: + + $ASTROBEE_SOURCE_PATH/astrobee/config/cameras.config + $ASTROBEE_SOURCE_PATH/astrobee/config/geometry.config + $ASTROBEE_SOURCE_PATH/astrobee/config/transforms.config + $ASTROBEE_SOURCE_PATH/astrobee/config/robots/robotName.config + +should be modified and entities added, mirroring ``sci_cam``, so having +fields for intrinsics, extrinsics (nav cam to given cam transform), +timestamp offset (by default it should be 0), and body to given camera +transform. + +Note the first three config files above are used by any robot, while +the last one refers only to the robot that is desired to use with the +new camera. The Lua-based processing of config files is tolerant of +the fact that a different robot config file lacks a new camera's +settings even if the shared config files define them. For a camera +that is expected to be present on all robots, each robot config file +should be changed, but otherwise just a desired robot's file can be +modified, in addition to the shared files. + +For a simulated camera, the above is not necessary. Instead, that +camera must define the topics: + + /sim/some_cam/pose + /sim/some_cam/info + +where the desired camera was named ``some_cam``, having the camera +poses and camera intrinsics info, respectively. It is suggested to +follow the example of ``heat_cam`` or ``sci_cam`` when creating +such a simulated camera. + +Then, whether the camera is real or simulated, a topic is expected +on which its images will be published, for example, named: + + /hw/cam_some + +which then need to be passed to the geometry and streaming mappers, +per the documentation for these tools. + +To visualize images published by your camera in ``rviz``, appropriate +entities must be added in ``iss.rviz``, etc. + +Uncompressed or compressed images are supported, but for the latter +adjustments must be made, mirroring ``sci_cam``. For example, the +image topic should be: + + /hw/cam_some/compressed + +except in ``iss.rviz``, where the suffix ``/compressed`` is not +needed, but instead the one sets ``Transport Hint: compressed``. + ## Compiling the software It is assumed that by now the Astrobee and ISAAC software is compiled. @@ -696,7 +750,7 @@ Ensure that the bot name is correct below. Set ``ASTROBEE_SOURCE_PATH``, --median_filters "7 0.1 25 0.1" \ --reliability_weight_exponent 2 \ --voxblox_integrator merged \ - --depth_hole_fill_diameter 30.0 \ + --depth_hole_fill_diameter 0 \ --max_ray_length 2.5 \ --voxel_size 0.01 \ --max_iso_times_exposure 5.1 \ @@ -747,17 +801,19 @@ Parameters: The default is 0. --foreshortening_delta: A smaller value here will result in holes in depth images being filled more aggressively but potentially - with more artifacts in foreshortened regions. + with more artifacts in foreshortened regions. Works only with + positive --depth_hole_fill_diameter. --median_filters: Given a list "w1 d1 w2 d2 ... ", remove a depth image point if it differs, in the Manhattan norm, from the median of cloud points in the pixel window of size wi centered at it by - more than di. This removes points sticking out for each such i. The - default is "7 0.1 25 0.1". + more than di. This removes points sticking out for each such + i. The default is "7 0.1 25 0.1". --depth_hole_fill_diameter: Fill holes in the depth point clouds - with this diameter, in pixels. This happens before the clouds - are fused. It is suggested to not make this too big, as more - hole-filling happens on the fused mesh later (see - --max_hole_diameter). The default is 30. + with this diameter, in pixels. This happens before the clouds are + fused. If set to a positive value it can fill really big holes but + may introduce artifacts. It is better to leave the hole-filling + for later, once the mesh is fused (see --max_hole_diameter). + The default is 0. --reliability_weight_exponent: A larger value will give more weight to depth points corresponding to pixels closer to depth image center, which are considered more reliable. The default is @@ -775,12 +831,16 @@ Parameters: to adjust for lightning differences, then apply the gamma transform back. This value should be set to the maximum observed ISO * exposure_time. The default is 5.1. Not used with simulated data. - --smoothing_time: A larger value will result in a smoother mesh. The default - is 0.00005. - --max_num_hole_edges: Close holes in the mesh which have no more than this - many edges. The default is 1000. - --max_hole_diameter: The diameter (in meters) of the largest hole in the - mesh to fill. The default is 0.3. + --smoothing_time: A larger value will result in a smoother mesh. The + default is 0.00005. + --no_boundary_erosion: Do not erode the boundary when smoothing + the mesh. Erosion may help with making the mesh more regular and + easier to hole-fill, but may be undesirable in regions which + don't get to be hole-filled. + --max_num_hole_edges: Close holes in the mesh which have no more + than this many edges. The default is 1000. + --max_hole_diameter: The diameter (in meters) of the largest hole + in the mesh to fill. The default is 0.3. --num_min_faces_in_component: Keep only connected mesh components with at least this many faces. --num_components_to_keep: How many of the largest connected components @@ -977,7 +1037,7 @@ To run the streaming mapper with real data for the given bot, do: streaming_mapper:=true output:=screen Wait until it finishes forming the texture model, which may take -30 seconds on more. +30 seconds, or up to 3 minutes for very large meshes. Ensure that the ``ASTROBEE_ROBOT`` name is correct above. @@ -1017,7 +1077,8 @@ with the image sampler, or /hw/cam_nav if no image sampler was used. The rest of the cameras usually follow -the convention on the line above. +the convention on the line above, for example, the ``heat_cam`` topic +is ``/hw/cam_heat``. For the sci cam, the streaming mapper also expects image exposure data on the topic: @@ -1116,8 +1177,8 @@ Ensure the same environment as before is present, including the robot name, and run: roslaunch isaac isaac_astrobee.launch mlp:=local llp:=disabled \ - nodes:=framestore,localization_node,localization_manager \ - streaming_mapper:=true output:=screen + nodes:=framestore,localization_node streaming_mapper:=true \ + output:=screen Wait until the textured model is created, which can take a minute. diff --git a/dense_map/geometry_mapper/src/texture_processing.cc b/dense_map/geometry_mapper/src/texture_processing.cc index 595784ff..339f86e3 100644 --- a/dense_map/geometry_mapper/src/texture_processing.cc +++ b/dense_map/geometry_mapper/src/texture_processing.cc @@ -57,33 +57,36 @@ namespace dense_map { -const int NUM_CHANNELS = 4; // BRGA -const int TILE_PADDING = 4; +// Use int64_t values as much as possible when it comes to dealing with +// pixel indices, as for large images stored as a single array, an int +// (which is same as int32) index was shown to overflow. +const int64_t NUM_CHANNELS = 4; // BRGA +const int64_t TILE_PADDING = 4; -IsaacTextureAtlas::IsaacTextureAtlas(unsigned int width, unsigned int height) +IsaacTextureAtlas::IsaacTextureAtlas(int64_t width, int64_t height) : width(width), height(height), determined_height(0), padding(TILE_PADDING), finalized(false) { bin = RectangularBin::create(width, height); - image = mve::ByteImage::create(width, height, NUM_CHANNELS); + + // Do not create the image buffer yet, we don't need it until after its precise + // size is determined. } -/** - * Copies the src image into the dest image at the given position, - * optionally adding a border. - * @warning asserts that the given src image fits into the given dest image. - */ -void copy_into(mve::ByteImage::ConstPtr src, int x, int y, mve::ByteImage::Ptr dest, int border = 0) { - assert(x >= 0 && x + src->width() + 2 * border <= dest->width()); - assert(y >= 0 && y + src->height() + 2 * border <= dest->height()); +// Copies the src image into the dest image at the given position, +// optionally adding a border. +// It asserts that the given src image fits into the given dest image. +void copy_into(mve::ByteImage::ConstPtr src, int64_t x, int64_t y, mve::ByteImage::Ptr dest, int64_t border = 0) { + assert(x >= 0L && x + src->width() + 2 * border <= dest->width()); + assert(y >= 0L && y + src->height() + 2 * border <= dest->height()); assert(src->channels() == dest->channels()); - for (int i = 0; i < src->width() + 2 * border; ++i) { - for (int j = 0; j < src->height() + 2 * border; j++) { - int sx = i - border; - int sy = j - border; + for (int64_t i = 0; i < src->width() + 2 * border; i++) { + for (int64_t j = 0; j < src->height() + 2 * border; j++) { + int64_t sx = i - border; + int64_t sy = j - border; if (sx < 0 || sx >= src->width() || sy < 0 || sy >= src->height()) continue; - for (int c = 0; c < src->channels(); ++c) { + for (int64_t c = 0; c < src->channels(); ++c) { dest->at(x + i, y + j, c) = src->at(sx, sy, c); } } @@ -94,37 +97,41 @@ typedef std::vector> PixelVector; typedef std::set> PixelSet; bool IsaacTextureAtlas::insert(IsaacTexturePatch::ConstPtr texture_patch) { - if (finalized) throw util::Exception("No insertion possible, IsaacTextureAtlas already finalized"); + if (finalized) + throw util::Exception("No insertion possible, IsaacTextureAtlas already finalized"); assert(bin != NULL); - int const width = texture_patch->get_width() + 2 * padding; - int const height = texture_patch->get_height() + 2 * padding; - Rect rect(0, 0, width, height); + int64_t width = texture_patch->get_width() + 2 * padding; + int64_t height = texture_patch->get_height() + 2 * padding; + Rect rect = Rect(0, 0, width, height); if (!bin->insert(&rect)) return false; // Keep track how many rows of the allocated texture we actually use - determined_height = std::max((unsigned int)(rect.max_y), determined_height); + determined_height = std::max(rect.max_y, determined_height); // Note how we don't bother to do any copying, as at this stage we // only care for the structure // copy_into(patch_image, rect.min_x, rect.min_y, image, padding); - IsaacTexturePatch::Faces const& patch_faces = texture_patch->get_faces(); - IsaacTexturePatch::Texcoords const& patch_texcoords = texture_patch->get_texcoords(); + IsaacTexturePatch::Faces const& patch_faces = texture_patch->get_faces(); // alias + IsaacTexturePatch::Texcoords const& patch_texcoords = texture_patch->get_texcoords(); // alias - /* Calculate the offset of the texture patches' relative texture coordinates */ + // Calculate where the patch will go after insertion math::Vec2f offset = math::Vec2f(rect.min_x + padding, rect.min_y + padding); faces.insert(faces.end(), patch_faces.begin(), patch_faces.end()); - /* Calculate the final textcoords of the faces. */ - for (std::size_t i = 0; i < patch_faces.size(); ++i) { - for (int j = 0; j < 3; ++j) { + // Insert the texcoords in the right place + for (std::size_t i = 0; i < patch_faces.size(); i++) { + for (int64_t j = 0; j < 3; j++) { math::Vec2f rel_texcoord(patch_texcoords[i * 3 + j]); + math::Vec2f texcoord = rel_texcoord + offset; - // Delay this until later + // Delay this normalization until we know the final width and + // height of the atlas. + // texcoord[0] = texcoord[0] / this->width; // texcoord[1] = texcoord[1] / this->height; @@ -161,16 +168,17 @@ void IsaacTextureAtlas::merge_texcoords() { } } -// Set the final height and scale the texcoords by removing -// redundant space +// Set the final height by removing unused space. Allocate the image buffer. void IsaacTextureAtlas::resize_atlas() { // Round up a little to make it a multiple of 2 * padding. Not strictly necessary // but looks nicer that way. - this->determined_height = (2 * padding) * (ceil(this->determined_height / static_cast(2 * padding))); - this->determined_height = std::min(this->height, this->determined_height); + this->determined_height + = (2 * padding) * (ceil(this->determined_height / static_cast(2 * padding))); + this->determined_height = std::min(this->height, this->determined_height); this->height = this->determined_height; - this->image->resize(this->width, this->height, image->channels()); + + this->image = mve::ByteImage::create(this->width, this->height, NUM_CHANNELS); } // Scale the texcoords once the final atlas dimensions are known @@ -190,7 +198,8 @@ void IsaacTextureAtlas::finalize() { this->finalized = true; } -void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, std::vector const& texture_atlases, +void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, + std::vector const& texture_atlases, ObjModel* obj_model) { mve::TriangleMesh::VertexList const& mesh_vertices = mesh->get_vertices(); mve::TriangleMesh::NormalList const& mesh_normals = mesh->get_vertex_normals(); @@ -224,7 +233,7 @@ void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, std::vector& mesh_info, std::shared_ptr& graph, + std::shared_ptr& mesh_info, + std::shared_ptr& graph, std::shared_ptr& bvh_tree) { - std::cout << "Load and prepare mesh from " << mesh_file << std::endl; + std::cout << "Loading mesh: " << mesh_file << std::endl; mesh = mve::geom::load_ply_mesh(mesh_file); mesh_info = std::shared_ptr(new mve::MeshInfo(mesh)); tex::prepare_mesh(mesh_info.get(), mesh); - std::cout << "Building adjacency graph: " << std::endl; std::size_t const num_faces = mesh->get_faces().size() / 3; if (num_faces > std::numeric_limits::max()) throw std::runtime_error("Exeeded maximal number of faces"); @@ -266,7 +275,7 @@ void loadMeshBuildTree(std::string const& mesh_file, mve::TriangleMesh::Ptr& mes util::WallTimer timer; std::vector const& faces = mesh->get_faces(); std::vector const& vertices = mesh->get_vertices(); - std::cout << "Building BVH tree from " << faces.size() / 3 << " faces.\n"; + std::cout << "Number of faces: " << faces.size() / 3 << " faces.\n"; bvh_tree = std::shared_ptr(new BVHTree(faces, vertices)); std::cout << "Building the tree took: " << timer.get_elapsed() / 1000.0 << " seconds\n"; } @@ -283,17 +292,16 @@ math::Vec3f eigen_to_vec3f(Eigen::Vector3d const& V) { return v; } -void calculate_texture_size(double height_factor, - std::list const& texture_patches, - int& texture_width, int& texture_height) { - int64_t total_area = 0; - int max_width = 0; - int max_height = 0; - int padding = TILE_PADDING; +void calculate_texture_size(double height_factor, std::list const& texture_patches, + int64_t& texture_width, int64_t& texture_height) { + int64_t total_area = 0; // this can be huge and may overflow with 32 bit int + int64_t max_width = 0; + int64_t max_height = 0; + int64_t padding = TILE_PADDING; for (IsaacTexturePatch::ConstPtr texture_patch : texture_patches) { - int width = texture_patch->get_width() + 2 * padding; - int height = texture_patch->get_height() + 2 * padding; + int64_t width = texture_patch->get_width() + 2 * padding; + int64_t height = texture_patch->get_height() + 2 * padding; max_width = std::max(max_width, width); max_height = std::max(max_height, height); @@ -302,7 +310,7 @@ void calculate_texture_size(double height_factor, total_area += area; } - texture_width = std::max(4 * 1024, max_width); + texture_width = std::max(4L * 1024L, max_width); texture_height = ceil(static_cast(total_area / texture_width)); texture_height = std::max(texture_height, max_height); @@ -317,6 +325,7 @@ bool texture_patch_compare(IsaacTexturePatch::ConstPtr first, IsaacTexturePatch: // If there is more than one texture atlas merge them, as it is // difficult to later manage multiple texture images. // TODO(oalexan1): This does not work, despite trying to debug it a lot. +// TODO(oalexan1): Maybe it is because texcoord_ids are not copied! void merge_texture_atlases(std::vector* texture_atlases) { if (texture_atlases->size() <= 1) return; @@ -330,7 +339,7 @@ void merge_texture_atlases(std::vector* texture_atlases) } // Find the final width and height - int final_height = 0, final_width = prev_atlases[0]->get_width(); + int64_t final_height = 0, final_width = prev_atlases[0]->get_width(); for (size_t atlas_it = 0; atlas_it < prev_atlases.size(); atlas_it++) { final_height += prev_atlases[atlas_it]->get_height(); } @@ -339,12 +348,12 @@ void merge_texture_atlases(std::vector* texture_atlases) // but no texcoords exist yet texture_atlases->push_back(IsaacTextureAtlas::create(final_width, final_height)); - // Copy the image data - int image_start = 0; + // Copy the image data. Need to use 'int64_t' values as an int may overflow. + int64_t image_start = 0; mve::ByteImage::Ptr& dest_image = (*texture_atlases)[0]->get_image(); for (size_t atlas_it = 0; atlas_it < prev_atlases.size(); atlas_it++) { mve::ByteImage::Ptr& prev_image = prev_atlases[atlas_it]->get_image(); - int prev_image_len = prev_image->width() * prev_image->height() * prev_image->channels(); + int64_t prev_image_len = prev_image->width() * prev_image->height() * prev_image->channels(); std::memcpy(dest_image->begin() + image_start, prev_image->begin(), prev_image_len); // prepare for the next iteration @@ -352,7 +361,7 @@ void merge_texture_atlases(std::vector* texture_atlases) } // Copy the texcoords - int texcoord_start = 0; + int64_t texcoord_start = 0; IsaacTextureAtlas::Texcoords& dest_coords = (*texture_atlases)[0]->get_texcoords(); dest_coords.clear(); for (size_t atlas_it = 0; atlas_it < prev_atlases.size(); atlas_it++) { @@ -361,7 +370,7 @@ void merge_texture_atlases(std::vector* texture_atlases) IsaacTextureAtlas::Faces& prev_faces = prev_atlases[atlas_it]->get_faces(); for (size_t face_it = 0; face_it < prev_faces.size(); face_it++) { - for (int j = 0; j < 3; ++j) { + for (int64_t j = 0; j < 3; j++) { math::Vec2f rel_texcoord(prev_coords[face_it * 3 + j]); math::Vec2f texcoord = rel_texcoord + offset; dest_coords.push_back(texcoord); @@ -385,36 +394,39 @@ void merge_texture_atlases(std::vector* texture_atlases) prev_atlases.clear(); // wipe this } -void generate_texture_atlases(double height_factor, std::vector& texture_patches, - std::map& face_positions, std::vector* texture_atlases, +void generate_texture_atlases(double height_factor, + std::vector& texture_patches, + std::map& face_positions, + std::vector* texture_atlases, std::vector>& atlas_sizes) { texture_atlases->clear(); atlas_sizes.clear(); // Make a copy of the pointers to texture patches that we will - // modify Use a list from which it is easier to erase things. + // modify. Use a list from which it is easier to erase things. std::list local_texture_patches; - for (size_t it = 0; it < texture_patches.size(); it++) local_texture_patches.push_back(texture_patches[it]); + for (size_t it = 0; it < texture_patches.size(); it++) + local_texture_patches.push_back(texture_patches[it]); /* Improve the bin-packing algorithm efficiency by sorting texture patches * in descending order of size. */ local_texture_patches.sort(texture_patch_compare); // Find the face positions after sorting - int count = 0; + int64_t count = 0; for (auto it = local_texture_patches.begin(); it != local_texture_patches.end(); it++) { IsaacTexturePatch::ConstPtr ptr = *it; IsaacTexturePatch const* P = ptr.get(); - int label = P->get_label(); + int64_t label = P->get_label(); face_positions[label] = count; count++; } - std::size_t const total_num_patches = local_texture_patches.size(); - std::size_t remaining_patches = local_texture_patches.size(); + int64_t num_patches = local_texture_patches.size(); + count = 0; while (!local_texture_patches.empty()) { - int texture_width, texture_height; + int64_t texture_width = 0, texture_height = 0; calculate_texture_size(height_factor, local_texture_patches, texture_width, texture_height); texture_atlases->push_back(IsaacTextureAtlas::create(texture_width, texture_height)); @@ -423,16 +435,15 @@ void generate_texture_atlases(double height_factor, std::vector::iterator it = local_texture_patches.begin(); for (; it != local_texture_patches.end();) { - std::size_t done_patches = total_num_patches - remaining_patches; - int precent = static_cast(done_patches) / total_num_patches * 100.0f; - if (total_num_patches > 100 && done_patches % (total_num_patches / 100) == 0) { - } - if (texture_atlas->insert(*it)) { it = local_texture_patches.erase(it); - remaining_patches -= 1; + count++; + + if (count % 5000 == 0 || count == num_patches) { + std::cout << "Adding patches: " << count << "/" << num_patches << std::endl; + } } else { - ++it; + it++; } } @@ -447,15 +458,15 @@ void generate_texture_atlases(double height_factor, std::vectormerge_texcoords(); (*texture_atlases)[atlas_it]->scale_texcoords(); - atlas_sizes.push_back( - std::make_pair((*texture_atlases)[atlas_it]->get_width(), (*texture_atlases)[atlas_it]->get_height())); + atlas_sizes.push_back(std::make_pair((*texture_atlases)[atlas_it]->get_width(), + (*texture_atlases)[atlas_it]->get_height())); } return; } void isaac_save_model(ObjModel* obj_model, std::string const& prefix) { - int OBJ_INDEX_OFFSET = 1; + int64_t OBJ_INDEX_OFFSET = 1; ObjModel::Vertices const& vertices = obj_model->get_vertices(); ObjModel::Normals const& normals = obj_model->get_normals(); @@ -472,21 +483,21 @@ void isaac_save_model(ObjModel* obj_model, std::string const& prefix) { out << "mtllib " << name << ".mtl" << '\n'; out << std::fixed << std::setprecision(8); - for (std::size_t i = 0; i < vertices.size(); ++i) { + for (std::size_t i = 0; i < vertices.size(); i++) { out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << '\n'; } - for (std::size_t i = 0; i < texcoords.size(); ++i) { + for (std::size_t i = 0; i < texcoords.size(); i++) { out << "vt " << texcoords[i][0] << " " << 1.0f - texcoords[i][1] << '\n'; } - for (std::size_t i = 0; i < normals.size(); ++i) { + for (std::size_t i = 0; i < normals.size(); i++) { out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << '\n'; } - for (std::size_t i = 0; i < groups.size(); ++i) { + for (std::size_t i = 0; i < groups.size(); i++) { out << "usemtl " << groups[i].material_name << '\n'; - for (std::size_t j = 0; j < groups[i].faces.size(); ++j) { + for (std::size_t j = 0; j < groups[i].faces.size(); j++) { ObjModel::Face const& face = groups[i].faces[j]; out << "f"; for (std::size_t k = 0; k < 3; ++k) { @@ -573,12 +584,13 @@ struct Edge { // Move an edge this far along the edge normal Edge bias_edge(Edge const& e, double bias) { pointPair n = e.get_normal(); - return Edge(e.x0 + bias * n.first, e.y0 + bias * n.second, e.x1 + bias * n.first, e.y1 + bias * n.second); + return Edge(e.x0 + bias * n.first, e.y0 + bias * n.second, + e.x1 + bias * n.first, e.y1 + bias * n.second); } bool edge_intersection(Edge A, Edge B, pointPair& out) { - return edge_intersection(std::make_pair(A.x0, A.y0), std::make_pair(A.x1, A.y1), std::make_pair(B.x0, B.y0), - std::make_pair(B.x1, B.y1), out); + return edge_intersection(std::make_pair(A.x0, A.y0), std::make_pair(A.x1, A.y1), + std::make_pair(B.x0, B.y0), std::make_pair(B.x1, B.y1), out); } // A triangle with three edges @@ -633,22 +645,22 @@ Triangle bias_triangle(Triangle const& tri, double bias) { ans2 = edge_intersection(biased_edges[1], biased_edges[2], E2); // Apply the bias only for non-degenerate triangles - if (ans0 && ans1 && ans2) return Triangle(E0.first, E0.second, E1.first, E1.second, E2.first, E2.second); + if (ans0 && ans1 && ans2) + return Triangle(E0.first, E0.second, E1.first, E1.second, E2.first, E2.second); return tri; } -// Given a mesh and a given pixel size in meters (say 0.001), -// overlay a grid of that pixel size on top of each triangle and -// form a textured 3D model with a pixel at each node. -// Store the transforms that allows one at any time to -// project an image onto that triangle and copy the image -// interpolated at the grid points into the texture. -// This way the size of the texture image is fixed -// for the given texture mesh and different images result -// in different pixels in this texture. - -void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_threads, +// Given a mesh and a given pixel size in meters (say 0.001), overlay +// a grid of that pixel size on top of each triangle and form a +// textured 3D model with a pixel at each grid point. Store the +// transforms that allows one at any time to project an image onto +// that triangle and copy the image interpolated at the grid points +// into the texture buffer. This way the size of the texture buffer +// depends only on the mesh geometry and not on the number, orientation, +// or resolution of the images projected on the mesh. + +void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_threads, // outputs std::vector& face_projection_info, std::vector& texture_atlases, tex::Model& model) { @@ -661,44 +673,68 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre std::vector const& vertices = mesh->get_vertices(); std::vector const& mesh_normals = mesh->get_vertex_normals(); - if (vertices.size() != mesh_normals.size()) LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; + if (vertices.size() != mesh_normals.size()) + LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; std::vector const& faces = mesh->get_faces(); std::vector const& face_normals = mesh->get_face_normals(); - int num_faces = faces.size() / 3; + // Initialize face_projection_info. Likely the constructor will be + // called either way but it is safer this way. + int64_t num_faces = faces.size() / 3; face_projection_info.resize(num_faces); + for (int64_t it = 0; it < num_faces; it++) face_projection_info[it] = FaceInfo(); - // Store here texture coords before they get packed in the model + // Store here texture coords before they get packed in the model. + // Ensure that this is initialized. ObjModel::TexCoords input_texcoords(3 * num_faces); + for (size_t it = 0; it < input_texcoords.size(); it++) input_texcoords[it] = math::Vec2f(0.0f, 0.0f); std::vector texture_patches(num_faces); + double total_area = 0.0; #pragma omp parallel for - for (int face_id = 0; face_id < num_faces; face_id++) { + for (int64_t face_id = 0; face_id < num_faces; face_id++) { math::Vec3f const& v1 = vertices[faces[3 * face_id + 0]]; math::Vec3f const& v2 = vertices[faces[3 * face_id + 1]]; math::Vec3f const& v3 = vertices[faces[3 * face_id + 2]]; // math::Vec3f const & face_normal = face_normals[face_id]; - // A mesh triangle is visible if rays from its vertices do not - // intersect the mesh somewhere else before hitting the camera, - // and hit the camera inside the image bounds. + // The mesh triangle math::Vec3f const* samples[] = {&v1, &v2, &v3}; std::vector patch_faces(1); patch_faces[0] = face_id; + std::vector face_texcoords(3); // Convert the triangle corners to Eigen so that we have double precision std::vector V(3); for (size_t v_it = 0; v_it < 3; v_it++) V[v_it] = vec3f_to_eigen(*samples[v_it]); + // The area of the given face + double area = 0.5 * ((V[1] - V[0]).cross(V[2] - V[0])).norm(); + total_area += area; + +#if 0 + // TODO(oalexan1): Sort this out + if (area == 0.0) { + std::cout << "Bad vertex " << V[0].transpose() << std::endl; + // Ignore degenerate faces + face_projection_info[face_id].valid = false; + int64_t width = 0, height = 0; // later such patches will be ignored + for (size_t v_it = 0; v_it < 3; v_it++) + face_texcoords[v_it] = math::Vec2f(0.0f, 0.0f); // ensure this is initialized + texture_patches[face_id] = IsaacTexturePatch::create(face_id, patch_faces, face_texcoords, + width, height); + continue; + } +#endif + // Compute the double precision normal, as we will do a lot of // careful math. It agrees with the pre-existing float normal. Eigen::Vector3d N = (V[1] - V[0]).cross(V[2] - V[0]); if (N != Eigen::Vector3d(0, 0, 0)) N.normalize(); - // Eigen::Vector3d N0 = vec3f_to_eigen(face_normal); // pre-existing - // normal + // Eigen::Vector3d N0 = vec3f_to_eigen(face_normal); // pre-existing normal double a, e; dense_map::normalToAzimuthAndElevation(N, a, e); @@ -714,8 +750,8 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre YZPlaneToTriangleFace.translation() = Eigen::Vector3d(0, 0, 0); Eigen::Affine3d inv_trans = YZPlaneToTriangleFace.inverse(); - // Transform the vertices, they will end up in the same plane - // with x constant. + // Transform the vertices, they will end up in the same plane with + // x constant. This will make it easy to sample the face. std::vector TransformedVertices; TransformedVertices.resize(3); double x = 0, min_y = 0, max_y = 0, min_z = 0, max_z = 0; @@ -736,26 +772,19 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre } // Put a little padding just in case - // TODO(oalexan1): Put back a padding of 2 maybe! - int padding = 1; + int64_t padding = 1; min_y -= padding * pixel_size; max_y += padding * pixel_size; min_z -= padding * pixel_size; max_z += padding * pixel_size; - int width = ceil((max_y - min_y) / pixel_size) + 1; - int height = ceil((max_z - min_z) / pixel_size) + 1; - - // std::cout << "width and height are " << width << ' ' << height << - // std::endl; - - std::vector face_texcoords(3); + int64_t width = ceil((max_y - min_y) / pixel_size) + 1; + int64_t height = ceil((max_z - min_z) / pixel_size) + 1; // Record where the triangle vertices will be in the sampled bounding box of the face - for (size_t v_it = 0; v_it < 3; v_it++) { + for (size_t v_it = 0; v_it < 3; v_it++) face_texcoords[v_it] = math::Vec2f((TransformedVertices[v_it][1] - min_y) / pixel_size, (TransformedVertices[v_it][2] - min_z) / pixel_size); - } // Append these to the bigger list for (size_t v_it = 0; v_it < face_texcoords.size(); v_it++) { @@ -770,15 +799,17 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre F.min_z = min_z; F.width = width; F.height = height; - F.TransformedVertices = TransformedVertices; + F.TransformedVertices = TransformedVertices; F.YZPlaneToTriangleFace = YZPlaneToTriangleFace; F.padding = padding; - // We won't really use this image, but need its dimensions - texture_patches[face_id] = IsaacTexturePatch::create(face_id, patch_faces, face_texcoords, width, height); + // Form a little rectangle, which later will be inserted in the right place + // in the texture atlas + texture_patches[face_id] = IsaacTexturePatch::create(face_id, patch_faces, face_texcoords, + width, height); } // End loop over mesh faces - // for face i, 3 * face_position[i] will be where it is starts being stored + // For face i, 3 * face_position[i] will be where it is starts being stored // in the uv array. std::map face_positions; std::vector> atlas_sizes; @@ -789,7 +820,8 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre // allocated for the atlas. Normally we will get it right from the first // try though. for (int attempt = 0; attempt < 10; attempt++) { - generate_texture_atlases(height_factor, texture_patches, face_positions, &texture_atlases, atlas_sizes); + generate_texture_atlases(height_factor, texture_patches, face_positions, + &texture_atlases, atlas_sizes); if (texture_atlases.size() <= 1) break; @@ -799,12 +831,13 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre } // If still failed, give up - if (texture_atlases.size() > 1) LOG(FATAL) << "More than one texture atlas was generated, which is not supported."; + if (texture_atlases.size() > 1) + LOG(FATAL) << "More than one texture atlas was generated, which is not supported."; if (texture_atlases.empty()) LOG(FATAL) << "No texture atlas was created."; - int texture_width = atlas_sizes[0].first; - int texture_height = atlas_sizes[0].second; + int64_t texture_width = atlas_sizes[0].first; + int64_t texture_height = atlas_sizes[0].second; // Build the model from atlases isaac_build_model(mesh, texture_atlases, &model); @@ -815,22 +848,20 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre // The two vectors below would be equal if we process the mesh fully, // and not so unless we stop early. - if (model_texcoords.size() != input_texcoords.size()) LOG(FATAL) << "Book-keeping failure regarding texcoords."; + if (model_texcoords.size() != input_texcoords.size()) + LOG(FATAL) << "Book-keeping failure regarding texcoords."; for (std::size_t i = 0; i < model_texcoords.size() / 3; i++) { auto face_pos = face_positions.find(i); - if (face_pos == face_positions.end()) { - LOG(FATAL) << "Cannot find position for index " << i; - } - - int mapped_i = face_pos->second; + if (face_pos == face_positions.end()) LOG(FATAL) << "Cannot find position for index " << i; + int64_t mapped_i = face_pos->second; // By comparing where the texcoords were before being put in the // textured mesh file and where it ends up in that file, we can // determine the shift that was used to place a texture patch in // the texture. - int shift_u = round(model_texcoords[3 * mapped_i][0] * texture_width - input_texcoords[3 * i][0]); - int shift_v = round(model_texcoords[3 * mapped_i][1] * texture_height - input_texcoords[3 * i][1]); + int64_t shift_u = round(model_texcoords[3 * mapped_i][0] * texture_width - input_texcoords[3 * i][0]); + int64_t shift_v = round(model_texcoords[3 * mapped_i][1] * texture_height - input_texcoords[3 * i][1]); face_projection_info[i].shift_u = shift_u; face_projection_info[i].shift_v = shift_v; @@ -858,13 +889,14 @@ void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector const& vertices = mesh->get_vertices(); std::vector const& mesh_normals = mesh->get_vertex_normals(); - if (vertices.size() != mesh_normals.size()) LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; + if (vertices.size() != mesh_normals.size()) + LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; // The uv_map assigns to each of the mesh vertices that is visible // in the texture its (u, v) pair. Find the map from each vertex // index to the index in the list of (u, v) pairs. std::map vertex_to_uv; - int count = 0; + int64_t count = 0; for (auto it = uv_map.begin(); it != uv_map.end(); it++) { vertex_to_uv[it->first] = count; count++; @@ -884,14 +916,16 @@ void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector b // image resolution. We will use the calibrated dimensions when // normalizing u and v, because when projecting 3D points in the // camera we will the calibrated camera model. - int raw_image_cols = image.cols; - int raw_image_rows = image.rows; - int calib_image_cols = cam.GetParameters().GetDistortedSize()[0]; - int calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; + int64_t raw_image_cols = image.cols; + int64_t raw_image_rows = image.rows; + int64_t calib_image_cols = cam.GetParameters().GetDistortedSize()[0]; + int64_t calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; - int factor = raw_image_cols / calib_image_cols; + int64_t factor = raw_image_cols / calib_image_cols; if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { @@ -1147,12 +1182,10 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Project texture using a texture model that was already pre-filled, so // just update pixel values -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, - cv::Mat const& image, camera::CameraModel const& cam, - std::vector& smallest_cost_per_face, double pixel_size, - int num_threads, std::vector const& face_projection_info, - std::vector& texture_atlases, tex::Model& model, - cv::Mat& out_texture) { +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, + camera::CameraModel const& cam, std::vector& smallest_cost_per_face, double pixel_size, + int64_t num_threads, std::vector const& face_projection_info, + std::vector& texture_atlases, tex::Model& model, cv::Mat& out_texture) { omp_set_dynamic(0); // Explicitly disable dynamic teams omp_set_num_threads(num_threads); // Use this many threads for all // consecutive parallel regions @@ -1164,12 +1197,12 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // image resolution. We will use the calibrated dimensions when // normalizing u and v, because when projecting 3D points in the // camera we will the calibrated camera model. - int raw_image_cols = image.cols; - int raw_image_rows = image.rows; - int calib_image_cols = cam.GetParameters().GetDistortedSize()[0]; - int calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; + int64_t raw_image_cols = image.cols; + int64_t raw_image_rows = image.rows; + int64_t calib_image_cols = cam.GetParameters().GetDistortedSize()[0]; + int64_t calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; - int factor = raw_image_cols / calib_image_cols; + int64_t factor = raw_image_cols / calib_image_cols; if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { LOG(FATAL) << "Published image width and height are: " @@ -1324,17 +1357,18 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // the triangle are sampled well. tri = bias_triangle(tri, 1.5 * pixel_size * F.padding); - for (int iz = 0; iz < F.height; iz++) { - double out_y0, out_y1; + for (int64_t iz = 0; iz < F.height; iz++) { + double out_y0 = -1.0, out_y1 = -1.0; double z = F.min_z + iz * pixel_size; bool success = tri.horizontal_line_intersect(z, out_y0, out_y1); if (!success) continue; - int min_iy = std::max(static_cast(floor((out_y0 - F.min_y) / pixel_size)), 0); - int max_iy = std::min(static_cast(ceil((out_y1 - F.min_y) / pixel_size)), F.width - 1); + int64_t min_iy = std::max(static_cast(floor((out_y0 - F.min_y) / pixel_size)), 0L); + int64_t max_iy = + std::min(static_cast(ceil((out_y1 - F.min_y) / pixel_size)), static_cast(F.width) - 1L); - for (int iy = min_iy; iy <= max_iy; iy++) { + for (int64_t iy = min_iy; iy <= max_iy; iy++) { // The point in the plane in which we do the book-keeping // Eigen::Vector3d P(F.x, F.min_y + iy * pixel_size, F.min_z + iz * // pixel_size); @@ -1350,7 +1384,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b if (cam_pt.z() <= 0) continue; // Get the undistorted pixel - Eigen::Vector2d undist_centered_pix = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); + Eigen::Vector2d undist_centered_pix + = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); if (std::abs(undist_centered_pix[0]) > cam.GetParameters().GetUndistortedHalfSize()[0] || std::abs(undist_centered_pix[1]) > cam.GetParameters().GetUndistortedHalfSize()[1]) { // If we are out of acceptable undistorted region, there's some uncertainty whether @@ -1361,11 +1396,12 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Get the distorted pixel value Eigen::Vector2d dist_pix; - cam.GetParameters().Convert(undist_centered_pix, &dist_pix); + cam.GetParameters().Convert + (undist_centered_pix, &dist_pix); // Skip pixels that don't project in the image, and potentially nan pixels. - bool is_good = (dist_pix.x() >= 0 && dist_pix.x() < calib_image_cols - 1 && dist_pix.y() >= 0 && - dist_pix.y() < calib_image_rows - 1); + bool is_good = (dist_pix.x() >= 0 && dist_pix.x() < calib_image_cols - 1 && + dist_pix.y() >= 0 && dist_pix.y() < calib_image_rows - 1); if (!is_good) continue; // Find the pixel value using bilinear interpolation. Note @@ -1380,11 +1416,11 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b cv::getRectSubPix(image, s, pix, interp_pix_val); cv::Vec3b color = interp_pix_val.at(0, 0); - // Find the location where to put the pixel - int offset = texture->channels() * ((F.shift_u + iy) + (F.shift_v + iz) * texture->width()); + // Find the location where to put the pixel. Use a int64_t as an int may overflow. + int64_t offset = texture->channels() * ((F.shift_u + iy) + (F.shift_v + iz) * texture->width()); // Copy the color - for (int channel = 0; channel < NUM_CHANNELS - 1; channel++) texture_ptr[offset + channel] = color[channel]; + for (int64_t channel = 0; channel < NUM_CHANNELS - 1; channel++) texture_ptr[offset + channel] = color[channel]; // Make it non-transparent texture_ptr[offset + NUM_CHANNELS - 1] = 255; @@ -1398,7 +1434,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Sanity check, to ensure nothing got mixed up with the multiple // threads - if (cost_val >= smallest_cost_per_face[face_id]) LOG(FATAL) << "Book-keeping error in estimating cost per face."; + if (cost_val >= smallest_cost_per_face[face_id]) + LOG(FATAL) << "Book-keeping error in estimating cost per face."; smallest_cost_per_face[face_id] = cost_val; } @@ -1413,13 +1450,9 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Project and save a mesh as an obj file to out_prefix.obj, // out_prefix.mtl, out_prefix.png. -void meshProject(mve::TriangleMesh::Ptr const& mesh, - std::shared_ptr const& bvh_tree, - cv::Mat const& image, - Eigen::Affine3d const& world_to_cam, - camera::CameraParameters const& cam_params, - int num_exclude_boundary_pixels, - std::string const& out_prefix) { +void meshProject(mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, cv::Mat const& image, + Eigen::Affine3d const& world_to_cam, camera::CameraParameters const& cam_params, + int64_t num_exclude_boundary_pixels, std::string const& out_prefix) { // Create the output directory, if needed std::string out_dir = boost::filesystem::path(out_prefix).parent_path().string(); if (out_dir != "") dense_map::createDir(out_dir); @@ -1428,7 +1461,7 @@ void meshProject(mve::TriangleMesh::Ptr const& mesh, std::map uv_map; std::vector const& faces = mesh->get_faces(); - int num_faces = faces.size(); + int64_t num_faces = faces.size(); std::vector smallest_cost_per_face(num_faces, 1.0e+100); camera::CameraModel cam(world_to_cam, cam_params); diff --git a/dense_map/geometry_mapper/tools/add_sci_cam_to_bag.cc b/dense_map/geometry_mapper/tools/add_sci_cam_to_bag.cc index 7b15efbc..5eb69ca8 100644 --- a/dense_map/geometry_mapper/tools/add_sci_cam_to_bag.cc +++ b/dense_map/geometry_mapper/tools/add_sci_cam_to_bag.cc @@ -117,9 +117,32 @@ int main(int argc, char** argv) { std::string topic = m.getTopic(); if (topic == FLAGS_sci_cam_topic) continue; - double timestamp = m.getTime().toSec(); - if (beg_time < 0) beg_time = timestamp; - end_time = timestamp; + // If the current message is an image or a point cloud, get its + // header timestamp. That is a more reliable time than + // m.getTime(), which is the time the message got recorded, not + // when it got made. + double timestamp = -1.0; + sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); + if (image_msg) { + timestamp = image_msg->header.stamp.toSec(); + } else { + sensor_msgs::CompressedImage::ConstPtr comp_image_msg = + m.instantiate(); + if (comp_image_msg) { + timestamp = comp_image_msg->header.stamp.toSec(); + } else { + sensor_msgs::PointCloud2::ConstPtr pc_msg = m.instantiate(); + if (pc_msg) { + timestamp = pc_msg->header.stamp.toSec(); + } + } + } + + if (timestamp > 0.0) { + // Record beg and end time from the headers + if (beg_time < 0) beg_time = timestamp; + end_time = timestamp; + } output_bag.write(m.getTopic(), m.getTime(), m); } diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index ece8e7f0..d5e5dc2c 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -847,7 +847,7 @@ void calc_median_residuals(std::vector const& residuals, if (len == 0) std::cout << name << ": " << "none"; else - std::cout << std::setprecision(8) + std::cout << std::setprecision(5) << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3] << ' ' << vals[it4]; std::cout << " (" << len << " residuals)" << std::endl; diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index 99e0b467..33bba1cd 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -592,8 +592,8 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero, int w // ray from the existing point to the new point being almost parallel // to existing nearby rays (emanating from camera center) which is not // good. -void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Zero, int radius1, int radius2, - double foreshortening_delta) { +void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Zero, int radius1, + int radius2, double foreshortening_delta) { // Copy depthMat to workMat, with the latter unchanged below depthMat.copyTo(workMat); @@ -1416,16 +1416,18 @@ DEFINE_int32(depth_exclude_rows, 0, "at margins to avoid some distortion of that data."); DEFINE_double(foreshortening_delta, 5.0, "A smaller value here will result in holes in depth images being filled more " - "aggressively but potentially with more artifacts in foreshortened regions."); + "aggressively but potentially with more artifacts in foreshortened regions." + "Works only with positive --depth_hole_fill_diameter."); DEFINE_string(median_filters, "7 0.1 25 0.1", "Given a list 'w1 d1 w2 d2 ... ', remove a depth image point " "if it differs, in the Manhattan norm, from the median of cloud points " "in the pixel window of size wi centered at it by more than di. This " "removes points sticking out for each such i."); -DEFINE_double(depth_hole_fill_diameter, 30.0, - "Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds " - "are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh " - "later (--max_hole_diameter)."); +DEFINE_double(depth_hole_fill_diameter, 0.0, + "Fill holes in the depth point clouds with this diameter, in pixels. This happens " + "before the clouds are fused. If set to a positive value it can fill really big " + "holes but may introduce artifacts. It is better to leave the hole-filling for " + "later, once the mesh is fused (see --max_hole_diameter)."); DEFINE_double(reliability_weight_exponent, 2.0, "A larger value will give more weight to depth points corresponding to " "pixels closer to depth image center, which are considered more reliable."); diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index fc265d12..dfbb7a44 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -80,7 +80,7 @@ def process_args(args): parser.add_argument( "--undistorted_crop_wins", dest="undistorted_crop_wins", - default="sci_cam,1250,1000 nav_cam,1100,776 haz_cam,210,160", + default="sci_cam,1250,1000 nav_cam,1100,776 haz_cam,250,200", help="The central region to keep after undistorting an image and " + "before texturing. For sci_cam the numbers are at 1/4th of the full " + "resolution (resolution of calibration) and will be adjusted for the " @@ -153,7 +153,7 @@ def process_args(args): default="5.0", help="A smaller value here will result in holes in depth images " + "being filled more aggressively but potentially with more artifacts " - + "in foreshortened regions.", + + "in foreshortened regions. Works only with positive --depth_hole_fill_diameter.", ) parser.add_argument( "--median_filters", @@ -167,9 +167,12 @@ def process_args(args): parser.add_argument( "--depth_hole_fill_diameter", dest="depth_hole_fill_diameter", - default="30", - help="Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (--max_hole_diameter).", - ) + default="0", + help="Fill holes in the depth point clouds with this diameter, in pixels. " + + "This happens before the clouds are fused. If set to a positive value it " + + "can fill really big holes but may introduce artifacts. It is better to " + + "leave the hole-filling for later, once the mesh is fused (see " + + "--max_hole_diameter).") parser.add_argument( "--reliability_weight_exponent", dest="reliability_weight_exponent", @@ -213,6 +216,16 @@ def process_args(args): default="0.00005", help="A larger value will result in a smoother mesh.", ) + + parser.add_argument( + "--no_boundary_erosion", + dest="no_boundary_erosion", + action="store_true", + help="Do not erode the boundary when smoothing the mesh. Erosion may help with " + + "making the mesh more regular and easier to hole-fill, but may be undesirable " + + "in regions which don't get to be hole-filled.", + ) + parser.add_argument( "--max_num_hole_edges", dest="max_num_hole_edges", @@ -567,7 +580,11 @@ def smoothe_mesh(input_mesh, output_mesh, args, attempt): raise Exception("Cannot find the smoothing tool:" + smoothe_mesh_path) num_iterations = "1" - smoothe_boundary = "1" + if args.no_boundary_erosion: + smoothe_boundary = "0" + else: + smoothe_boundary = "1" + cmd = [ smoothe_mesh_path, num_iterations, diff --git a/dense_map/geometry_mapper/tools/image_picker.cc b/dense_map/geometry_mapper/tools/image_picker.cc index 6026d5a6..82ddfe1e 100644 --- a/dense_map/geometry_mapper/tools/image_picker.cc +++ b/dense_map/geometry_mapper/tools/image_picker.cc @@ -295,13 +295,13 @@ int main(int argc, char** argv) { // Write the images to disk std::cout << "Writing the images to: " << FLAGS_output_nav_cam_dir << std::endl; + int bag_pos = 0; bool save_grayscale = true; // For feature matching need grayscale for (size_t nav_it = 0; nav_it < nav_cam_timestamps.size(); nav_it++) { - double found_time = -1.0; // won't be used, but expected by the api - int bag_pos = 0; // reset this each time + double found_time = -1.0; cv::Mat image; - if (!dense_map::lookupImage(nav_cam_timestamps[nav_it], nav_cam_handle.bag_msgs, - save_grayscale, image, bag_pos, + if (!dense_map::lookupImage(nav_cam_timestamps[nav_it], nav_cam_handle.bag_msgs, save_grayscale, image, + bag_pos, // will change each time found_time)) LOG(FATAL) << "Could not find image at the desired time."; @@ -309,6 +309,7 @@ int main(int argc, char** argv) { snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", FLAGS_output_nav_cam_dir.c_str(), nav_cam_timestamps[nav_it]); + std::cout << "Writing: " << filename_buffer << std::endl; cv::imwrite(filename_buffer, image); } return 0; diff --git a/dense_map/geometry_mapper/tools/streaming_mapper.cc b/dense_map/geometry_mapper/tools/streaming_mapper.cc index 068eab5e..6f9b770c 100644 --- a/dense_map/geometry_mapper/tools/streaming_mapper.cc +++ b/dense_map/geometry_mapper/tools/streaming_mapper.cc @@ -285,7 +285,8 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { } else { ROS_INFO_STREAM("Subscribing to uncompressed image topic: " << texture_cam_topic); uncompressed_texture_image_sub = - image_transport->subscribe(texture_cam_topic, 2, &StreamingMapper::UncompressedTextureCallback, this); + image_transport->subscribe(texture_cam_topic, 2, + &StreamingMapper::UncompressedTextureCallback, this); } // Load the mesh @@ -389,9 +390,11 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, camera::CameraModel const& cam, std::vector& smallest_cost_per_face, std::string const& out_prefix) { - omp_set_dynamic(0); // Explicitly disable dynamic teams - omp_set_num_threads(num_threads); // Use this many threads for all consecutive - // parallel regions + // Explicitly disable dynamic determination of number of threads + omp_set_dynamic(0); + + // Use this many threads for all consecutive parallel regions + omp_set_num_threads(num_threads); // Colorize the image if grayscale. Careful here to avoid making a // copy if we don't need one. Keep img_ptr pointing at the final @@ -421,9 +424,6 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, img_ptr = &scaled_image; } - // std::cout << "Exposure correction took " << timer1.get_elapsed()/1000.0 << - // " seconds\n"; - // Prepare the image for publishing sensor_msgs::ImagePtr msg; std::vector face_vec; @@ -436,8 +436,7 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", *img_ptr).toImageMsg(); } else { - dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, smallest_cost_per_face, - pixel_size, num_threads, + dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, smallest_cost_per_face, pixel_size, num_threads, face_projection_info, texture_atlases, texture_model, model_texture); // Note that the output image has an alpha channel msg = cv_bridge::CvImage(std_msgs::Header(), "bgra8", model_texture).toImageMsg(); @@ -572,8 +571,10 @@ void StreamingMapper::TextureCamSimInfoCallback(const sensor_msgs::CameraInfo::C // std::cout << "Received the camera info at time: " << info->header.stamp.toSec() << "\n"; m_texture_cam_params = - camera::CameraParameters(Eigen::Vector2i(image_width, image_height), Eigen::Vector2d(focal_length, focal_length), - Eigen::Vector2d(optical_center_x, optical_center_y), distortion); + camera::CameraParameters(Eigen::Vector2i(image_width, image_height), + Eigen::Vector2d(focal_length, focal_length), + Eigen::Vector2d(optical_center_x, optical_center_y), + distortion); } } @@ -694,7 +695,8 @@ void StreamingMapper::WipeOldImages() { num_to_keep = 25; // something in the middle } - while (texture_cam_images.size() > num_to_keep) texture_cam_images.erase(texture_cam_images.begin()); + while (texture_cam_images.size() > num_to_keep) + texture_cam_images.erase(texture_cam_images.begin()); // There is a second wiping in this code in a different place. After // an image is processed it will be wiped together with any images @@ -854,6 +856,7 @@ void StreamingMapper::ProcessingLoop() { continue; } + util::WallTimer timer; // All is good with this timestamp. We may still not want to process it, @@ -887,13 +890,12 @@ void StreamingMapper::ProcessingLoop() { curr_dist_bw_cams < m_dist_between_processed_cams && angle_diff < m_angle_between_processed_cams); if (skip_processing) { - // std::cout << "Skipping this camera." << std::endl; continue; } // May need to keep this comment long term // std::cout << "Processing the streaming camera image at time stamp " - // << texture_cam_image_timestamp << std::endl; + // << texture_cam_image_timestamp << std::endl; double iso = -1.0, exposure = -1.0; if (need_exif) { From 31b9deefb7257c3432b056da2ed292ee6b07d4a5 Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Thu, 13 Jan 2022 12:52:58 -0800 Subject: [PATCH 26/40] Style fix for python (#25) --- dense_map/geometry_mapper/tools/geometry_mapper.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index dfbb7a44..95314d77 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -172,7 +172,8 @@ def process_args(args): + "This happens before the clouds are fused. If set to a positive value it " + "can fill really big holes but may introduce artifacts. It is better to " + "leave the hole-filling for later, once the mesh is fused (see " - + "--max_hole_diameter).") + + "--max_hole_diameter).", + ) parser.add_argument( "--reliability_weight_exponent", dest="reliability_weight_exponent", @@ -216,7 +217,7 @@ def process_args(args): default="0.00005", help="A larger value will result in a smoother mesh.", ) - + parser.add_argument( "--no_boundary_erosion", dest="no_boundary_erosion", @@ -224,7 +225,7 @@ def process_args(args): help="Do not erode the boundary when smoothing the mesh. Erosion may help with " + "making the mesh more regular and easier to hole-fill, but may be undesirable " + "in regions which don't get to be hole-filled.", - ) + ) parser.add_argument( "--max_num_hole_edges", @@ -584,7 +585,7 @@ def smoothe_mesh(input_mesh, output_mesh, args, attempt): smoothe_boundary = "0" else: smoothe_boundary = "1" - + cmd = [ smoothe_mesh_path, num_iterations, From 07ad3c77bc2d3f5ce7cfbd7bcc7fff70b1229899 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Thu, 13 Jan 2022 13:35:43 -0800 Subject: [PATCH 27/40] Fix inspection tool save + cargo debian (#23) * add cargo to debians * fixing save option --- INSTALL.md | 2 +- .../inspection/src/inspection_node.cc | 28 +++++++++++-------- debian/rules | 2 +- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 5a7788cd..cbd6e943 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -102,7 +102,7 @@ A new catkin profile should be made to retain the configurations and easily swit --install-space $ARMHF_CHROOT_DIR/opt/isaac \ --devel-space $ARMHF_CHROOT_DIR/home/astrobee/isaac/devel \ --log-space $ARMHF_CHROOT_DIR/home/astrobee/isaac/logs \ - --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection isaac_hw_msgs wifi isaac gs_action_helper \ + --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection cargo isaac_hw_msgs wifi isaac gs_action_helper \ --install \ --cmake-args -DCMAKE_TOOLCHAIN_FILE=$ISAAC_WS/src/scripts/build/isaac_cross.cmake \ -DARMHF_CHROOT_DIR=$ARMHF_CHROOT_DIR diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc index 2b23169e..ebfdf3a2 100644 --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -640,19 +640,23 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { } // Save command case isaac_msgs::InspectionGoal::SAVE: - std::ofstream myfile; - std::string path = ros::package::getPath("inspection") + "/resources/current.txt"; - myfile.open(path); - for (int i = 0; i < goal_.inspect_poses.poses.size(); i++) { - myfile << goal_.inspect_poses.poses[i].position.x << " " - << goal_.inspect_poses.poses[i].position.y << " " - << goal_.inspect_poses.poses[i].position.z << " " - << goal_.inspect_poses.poses[i].orientation.x << " " - << goal_.inspect_poses.poses[i].orientation.y << " " - << goal_.inspect_poses.poses[i].orientation.z << " " - << goal_.inspect_poses.poses[i].orientation.w<< "\n"; + if (!goal_.inspect_poses.poses.empty() && goal_counter_ < goal_.inspect_poses.poses.size()) { + std::ofstream myfile; + std::string path = ros::package::getPath("inspection") + "/resources/current.txt"; + myfile.open(path); + for (int i = goal_counter_; i < goal_.inspect_poses.poses.size(); i++) { + myfile << goal_.inspect_poses.poses[i].position.x << " " + << goal_.inspect_poses.poses[i].position.y << " " + << goal_.inspect_poses.poses[i].position.z << " " + << goal_.inspect_poses.poses[i].orientation.x << " " + << goal_.inspect_poses.poses[i].orientation.y << " " + << goal_.inspect_poses.poses[i].orientation.z << " " + << goal_.inspect_poses.poses[i].orientation.w << "\n"; + } + myfile.close(); + } else { + NODELET_ERROR_STREAM("Nothing to save"); } - myfile.close(); return; } } diff --git a/debian/rules b/debian/rules index eb689b40..74a13876 100755 --- a/debian/rules +++ b/debian/rules @@ -46,7 +46,7 @@ override_dh_auto_configure: --install-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/opt/isaac \ --devel-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/devel \ --log-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/logs \ - --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection isaac_hw_msgs wifi isaac gs_action_helper \ + --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection cargo isaac_hw_msgs wifi isaac gs_action_helper \ --install \ --cmake-args \ -DCMAKE_TOOLCHAIN_FILE=$(CMAKE_TOOLCHAIN_FILE) \ From eb58909dca3d848ceb1f11fc2b0b3f090a04e1a5 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Fri, 14 Jan 2022 15:35:39 -0800 Subject: [PATCH 28/40] Docker image msg add (#26) * adding isaac tf config * adding soundsee transform from official data * adding custom isaac config path to be general * adding msg images upload to CI * add docker messages to PR actions * adding astrobee repo checkout * adding astrobee repo to pr action * trying to split the docker images * adding remote to dockerfile * split all docker compiles * fixing python version for ubuntu 18 --- .github/workflows/ci_pr.yml | 93 ++++++++++++++++++++---- .github/workflows/ci_push.yml | 105 +++++++++++++++++++++++---- scripts/docker/build.sh | 4 +- scripts/docker/isaac_msgs.Dockerfile | 4 +- 4 files changed, 172 insertions(+), 34 deletions(-) diff --git a/.github/workflows/ci_pr.yml b/.github/workflows/ci_pr.yml index 5463516b..d7b9e81b 100644 --- a/.github/workflows/ci_pr.yml +++ b/.github/workflows/ci_pr.yml @@ -11,13 +11,21 @@ jobs: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' @@ -25,24 +33,44 @@ jobs: -t isaac/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' -t isaac:latest-ubuntu16.04 + - name: Build messages image isaac/astrobee:msgs-ubuntu16.04 + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/astrobee:msgs-ubuntu16.04 + - name: Build messages image isaac/isaac:msgs-ubuntu16.04 + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:msgs-ubuntu16.04 build-bionic: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON=3 @@ -50,24 +78,45 @@ jobs: -t isaac/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON=3 -t isaac:latest-ubuntu18.04 + - name: Build messages image isaac/astrobee:msgs-ubuntu18.04 + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON='' + -t isaac/astrobee:msgs-ubuntu18.04 + + - name: Build messages image isaac/isaac:msgs-ubuntu18.04 + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON='' + -t isaac/isaac:msgs-ubuntu18.04 build-focal: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 @@ -75,8 +124,22 @@ jobs: -t isaac/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 -t isaac:latest-ubuntu20.04 + + - name: Build messages image isaac/astrobee:msgs-ubuntu20.04 + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + -t isaac/astrobee:msgs-ubuntu20.04 + + - name: Build messages image isaac/isaac:msgs-ubuntu20.04 + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + -t isaac/isaac:msgs-ubuntu20.04 \ No newline at end of file diff --git a/.github/workflows/ci_push.yml b/.github/workflows/ci_push.yml index 9ba53e2c..4129a4a7 100644 --- a/.github/workflows/ci_push.yml +++ b/.github/workflows/ci_push.yml @@ -11,13 +11,21 @@ jobs: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' @@ -25,13 +33,28 @@ jobs: -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04 + - name: Build messages dockers for Ubuntu 16 (astrobee) + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu16.04 + + - name: Build messages dockers for Ubuntu 16 (isaac) + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu16.04 + - name: Log in to registry run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin @@ -39,19 +62,29 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu16.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu16.04; fi; build-bionic: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON=3 @@ -59,13 +92,28 @@ jobs: -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON=3 --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04 + - name: Build messages dockers for Ubuntu 18 (astrobee) + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON='' + -t ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu18.04 + + - name: Build messages dockers for Ubuntu 18 (isaac) + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu18.04 + - name: Log in to registry run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin @@ -73,19 +121,29 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu18.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu18.04; fi; build-focal: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 @@ -93,13 +151,28 @@ jobs: -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04 + - name: Build messages dockers for Ubuntu 20 (astrobee) + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + -t ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu20.04 + + - name: Build messages dockers for Ubuntu 20 (isaac) + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu20.04 + - name: Log in to registry run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin @@ -107,3 +180,5 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu20.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu20.04; fi; diff --git a/scripts/docker/build.sh b/scripts/docker/build.sh index dd6e8541..8a0d5001 100755 --- a/scripts/docker/build.sh +++ b/scripts/docker/build.sh @@ -151,13 +151,13 @@ docker build ${astrobee_source:-${rootdir}} \ --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ --build-arg ROS_VERSION=${ROS_VERSION} \ --build-arg PYTHON=${PYTHON} \ - -t astrobee:msgs-ubuntu${UBUNTU_VERSION} + -t isaac/astrobee:msgs-ubuntu${UBUNTU_VERSION} docker build ${isaac_source:-${rootdir}} \ -f ${isaac_source:-${rootdir}}/scripts/docker/isaac_msgs.Dockerfile \ --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ --build-arg ROS_VERSION=${ROS_VERSION} \ --build-arg PYTHON=${PYTHON} \ - -t isaac:msgs-ubuntu${UBUNTU_VERSION} + -t isaac/isaac:msgs-ubuntu${UBUNTU_VERSION} # Build IDI and MAST export IDI_PATH=${idi_source} diff --git a/scripts/docker/isaac_msgs.Dockerfile b/scripts/docker/isaac_msgs.Dockerfile index e180891b..a56b91d2 100644 --- a/scripts/docker/isaac_msgs.Dockerfile +++ b/scripts/docker/isaac_msgs.Dockerfile @@ -20,8 +20,8 @@ # You must set the docker context to be the repository root directory ARG UBUNTU_VERSION=16.04 - -FROM astrobee:msgs-ubuntu${UBUNTU_VERSION} +ARG REMOTE=isaac +FROM ${REMOTE}/astrobee:msgs-ubuntu${UBUNTU_VERSION} ARG ROS_VERSION=kinetic ARG PYTHON="" From e576030f54a1ca42029629bf8c03d5007d8c5abc Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Fri, 14 Jan 2022 16:36:45 -0800 Subject: [PATCH 29/40] Docker image astrobee msgs upload on isaac (#27) * adding isaac tf config * adding soundsee transform from official data * adding custom isaac config path to be general * adding msg images upload to CI * add docker messages to PR actions * adding astrobee repo checkout * adding astrobee repo to pr action * trying to split the docker images * adding remote to dockerfile * split all docker compiles * fixing python version for ubuntu 18 * making astrobee msgs part of isaac because of permissions --- .github/workflows/ci_pr.yml | 6 +++--- .github/workflows/ci_push.yml | 12 ++++++------ scripts/docker/build.sh | 2 +- scripts/docker/isaac_msgs.Dockerfile | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci_pr.yml b/.github/workflows/ci_pr.yml index d7b9e81b..ddf03286 100644 --- a/.github/workflows/ci_pr.yml +++ b/.github/workflows/ci_pr.yml @@ -44,7 +44,7 @@ jobs: --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' - -t isaac/astrobee:msgs-ubuntu16.04 + -t isaac/isaac:astrobee-msgs-ubuntu16.04 - name: Build messages image isaac/isaac:msgs-ubuntu16.04 run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile --build-arg UBUNTU_VERSION=16.04 @@ -89,7 +89,7 @@ jobs: --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON='' - -t isaac/astrobee:msgs-ubuntu18.04 + -t isaac/isaac:astrobee-msgs-ubuntu18.04 - name: Build messages image isaac/isaac:msgs-ubuntu18.04 run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile @@ -135,7 +135,7 @@ jobs: --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 - -t isaac/astrobee:msgs-ubuntu20.04 + -t isaac/isaac:astrobee-msgs-ubuntu20.04 - name: Build messages image isaac/isaac:msgs-ubuntu20.04 run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile diff --git a/.github/workflows/ci_push.yml b/.github/workflows/ci_push.yml index 4129a4a7..60b58a05 100644 --- a/.github/workflows/ci_push.yml +++ b/.github/workflows/ci_push.yml @@ -45,7 +45,7 @@ jobs: --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' - -t ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu16.04 + -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu16.04 - name: Build messages dockers for Ubuntu 16 (isaac) run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile @@ -62,7 +62,7 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04; fi; - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu16.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu16.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu16.04; fi; build-bionic: @@ -104,7 +104,7 @@ jobs: --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON='' - -t ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu18.04 + -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu18.04 - name: Build messages dockers for Ubuntu 18 (isaac) run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile @@ -121,7 +121,7 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04; fi; - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu18.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu18.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu18.04; fi; build-focal: @@ -163,7 +163,7 @@ jobs: --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 - -t ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu20.04 + -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu20.04 - name: Build messages dockers for Ubuntu 20 (isaac) run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile @@ -180,5 +180,5 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04; fi; - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/astrobee:msgs-ubuntu20.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu20.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu20.04; fi; diff --git a/scripts/docker/build.sh b/scripts/docker/build.sh index 8a0d5001..aadffe94 100755 --- a/scripts/docker/build.sh +++ b/scripts/docker/build.sh @@ -151,7 +151,7 @@ docker build ${astrobee_source:-${rootdir}} \ --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ --build-arg ROS_VERSION=${ROS_VERSION} \ --build-arg PYTHON=${PYTHON} \ - -t isaac/astrobee:msgs-ubuntu${UBUNTU_VERSION} + -t isaac/isaac:astrobee-msgs-ubuntu${UBUNTU_VERSION} docker build ${isaac_source:-${rootdir}} \ -f ${isaac_source:-${rootdir}}/scripts/docker/isaac_msgs.Dockerfile \ --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ diff --git a/scripts/docker/isaac_msgs.Dockerfile b/scripts/docker/isaac_msgs.Dockerfile index a56b91d2..e06c7232 100644 --- a/scripts/docker/isaac_msgs.Dockerfile +++ b/scripts/docker/isaac_msgs.Dockerfile @@ -21,7 +21,7 @@ ARG UBUNTU_VERSION=16.04 ARG REMOTE=isaac -FROM ${REMOTE}/astrobee:msgs-ubuntu${UBUNTU_VERSION} +FROM ${REMOTE}/isaac:astrobee-msgs-ubuntu${UBUNTU_VERSION} ARG ROS_VERSION=kinetic ARG PYTHON="" From ec921e9e77fd6f0e9b8bc91382ade54b751dfea7 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Wed, 19 Jan 2022 17:32:02 -0800 Subject: [PATCH 30/40] changing parameters to transform; making station a parameter (#29) --- .../include/inspection/inspection.h | 2 +- .../behaviors/inspection/src/inspection.cc | 21 +++++--- .../inspection/src/inspection_node.cc | 2 +- isaac/config/behaviors/inspection.config | 54 ++++--------------- isaac/config/transforms.config | 5 ++ isaac/launch/isaac_astrobee.launch | 15 ++---- isaac/launch/robot/ILP.launch | 4 +- 7 files changed, 39 insertions(+), 64 deletions(-) diff --git a/astrobee/behaviors/inspection/include/inspection/inspection.h b/astrobee/behaviors/inspection/include/inspection/inspection.h index f833bdec..a19a33fe 100755 --- a/astrobee/behaviors/inspection/include/inspection/inspection.h +++ b/astrobee/behaviors/inspection/include/inspection/inspection.h @@ -133,7 +133,7 @@ class Inspection { tf2_ros::Buffer tf_buffer_; std::shared_ptr tf_listener_; ros::Publisher pub_; - tf2::Quaternion vent_to_scicam_rot_; + tf2::Quaternion target_to_scicam_rot_; geometry_msgs::PoseArray points_; // Vector containing inspection poses diff --git a/astrobee/behaviors/inspection/src/inspection.cc b/astrobee/behaviors/inspection/src/inspection.cc index 93678730..88c3be27 100644 --- a/astrobee/behaviors/inspection/src/inspection.cc +++ b/astrobee/behaviors/inspection/src/inspection.cc @@ -72,7 +72,6 @@ namespace inspection { void Inspection::ReadParam() { // Parameters Anomaly survey - opt_distance_ = cfg_->Get("optimal_distance"); dist_resolution_ = cfg_->Get("distance_resolution"); angle_resolution_ = cfg_->Get("angle_resolution"); max_angle_ = cfg_->Get("max_angle"); @@ -80,10 +79,20 @@ namespace inspection { min_distance_ = cfg_->Get("min_distance"); target_size_x_ = cfg_->Get("target_size_x"); target_size_y_ = cfg_->Get("target_size_y"); - vent_to_scicam_rot_ = tf2::Quaternion(cfg_->Get("vent_to_sci_cam_rotation_x"), - cfg_->Get("vent_to_sci_cam_rotation_y"), - cfg_->Get("vent_to_sci_cam_rotation_z"), - cfg_->Get("vent_to_sci_cam_rotation_w")); + + // Get transform from target to sci cam + try { + geometry_msgs::TransformStamped tf_target_to_sci_cam = tf_buffer_.lookupTransform("sci_cam", "target", + ros::Time(0)); + opt_distance_ = tf_target_to_sci_cam.transform.translation.z; + target_to_scicam_rot_ = tf2::Quaternion( + tf_target_to_sci_cam.transform.rotation.x, + tf_target_to_sci_cam.transform.rotation.y, + tf_target_to_sci_cam.transform.rotation.z, + tf_target_to_sci_cam.transform.rotation.w); + } catch (tf2::TransformException &ex) { + ROS_ERROR("ERROR getting target to sci_cam transform: %s", ex.what()); + } // Parameters Panorama survey pan_min_ = cfg_->Get("pan_min") * PI / 180.0; @@ -557,7 +566,7 @@ bool Inspection::PointInsideCuboid(geometry_msgs::Point const& x, for (double pan = pan_min_; pan <= pan_max_ + EPS; pan += k_pan) { ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); panorama_rotation.setRPY(0, tilt, pan); - panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * vent_to_scicam_rot_; + panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * target_to_scicam_rot_; point.orientation.x = panorama_rotation.x(); point.orientation.y = panorama_rotation.y(); point.orientation.z = panorama_rotation.z(); diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc index ebfdf3a2..12629e5d 100644 --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -515,7 +515,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { } // Allow image to stabilize - ros::Duration(2.0).sleep(); + ros::Duration(cfg_.Get("station_time")).sleep(); // Signal an imminent sci cam image sci_cam_req_ = true; diff --git a/isaac/config/behaviors/inspection.config b/isaac/config/behaviors/inspection.config index 436035e6..47fc3874 100644 --- a/isaac/config/behaviors/inspection.config +++ b/isaac/config/behaviors/inspection.config @@ -128,51 +128,6 @@ parameters = { }, -- INSPECTION CONFIG PARAMETERS { - id = "vent_to_sci_cam_rotation_x", - reconfigurable = true, - type = "double", - default = 0.5, - min = -1.0, - max = 1.0, - unit = "m", - description = "Quaternion rotation target to sci_cam x" - },{ - id = "vent_to_sci_cam_rotation_y", - reconfigurable = true, - type = "double", - default = -0.5, - min = -1.0, - max = 1.0, - unit = "m", - description = "Quaternion rotation target to sci_cam y" - },{ - id = "vent_to_sci_cam_rotation_z", - reconfigurable = true, - type = "double", - default = -0.5, - min = -1.0, - max = 1.0, - unit = "m", - description = "Quaternion rotation target to sci_cam z" - },{ - id = "vent_to_sci_cam_rotation_w", - reconfigurable = true, - type = "double", - default = 0.5, - min = -1.0, - max = 1.0, - unit = "m", - description = "Quaternion rotation target to sci_cam w" - },{ - id = "optimal_distance", - reconfigurable = true, - type = "double", - default = 1.0, - min = 0.2, - max = 3.0, - unit = "m", - description = "Optimal distance between target and camera" - },{ id = "distance_resolution", reconfigurable = true, type = "double", @@ -289,6 +244,15 @@ parameters = { max = 1.0, unit = "percentage", description = "Overlap between consecutive images" + },{ + id = "station_time", + reconfigurable = true, + type = "double", + default = 2.0, + min = 0.0, + max = 10.0, + unit = "seconds", + description = "Wait time in each station" }} \ No newline at end of file diff --git a/isaac/config/transforms.config b/isaac/config/transforms.config index ad7e015d..50abe5c4 100644 --- a/isaac/config/transforms.config +++ b/isaac/config/transforms.config @@ -27,6 +27,11 @@ transforms = { { global = false, parent = "body", child = "soundsee", transform = transform(vec3(0.131585, 0.00185, 0.1068), quat4(0.500, -0.500, 0.500, -0.500) ) }, + -- TARGET TO SCICAM + + { global = false, parent = "sci_cam", child = "target", + transform = transform(vec3(0.0, 0.0, 1.0), quat4(0.500, -0.500, -0.500, 0.500) ) }, + ------------- -- GLOBALS -- ------------- diff --git a/isaac/launch/isaac_astrobee.launch b/isaac/launch/isaac_astrobee.launch index 2b5a3cd6..5f41d84b 100644 --- a/isaac/launch/isaac_astrobee.launch +++ b/isaac/launch/isaac_astrobee.launch @@ -69,12 +69,13 @@ name="ASTROBEE_RESOURCE_DIR" value="$(find astrobee)/resources" /> - - + @@ -178,14 +179,8 @@ - - - - + + diff --git a/isaac/launch/robot/ILP.launch b/isaac/launch/robot/ILP.launch index 182b6920..544fcc24 100644 --- a/isaac/launch/robot/ILP.launch +++ b/isaac/launch/robot/ILP.launch @@ -38,6 +38,8 @@ name="ilp_cargo" output="$(arg output)"/> + @@ -68,7 +70,7 @@ - + From e53c8319c99290466af5eaf92e45927f0b6a6019 Mon Sep 17 00:00:00 2001 From: Oleg Alexandrov Date: Mon, 24 Jan 2022 15:39:00 -0800 Subject: [PATCH 31/40] Modularize camera_refiner (#30) --- dense_map/geometry_mapper/CMakeLists.txt | 4 + .../geometry_mapper/include/camera_image.h | 62 + .../geometry_mapper/include/dense_map_utils.h | 10 - .../geometry_mapper/include/interest_point.h | 28 + .../include/texture_processing.h | 145 +- dense_map/geometry_mapper/readme.md | 24 +- .../geometry_mapper/src/dense_map_utils.cc | 66 +- .../geometry_mapper/src/interest_point.cc | 396 +++ .../geometry_mapper/src/texture_processing.cc | 309 +- .../geometry_mapper/tools/camera_refiner.cc | 2785 +++++++---------- .../geometry_mapper/tools/orthoproject.cc | 2 +- .../geometry_mapper/tools/streaming_mapper.cc | 14 +- .../geometry_mapper/tools/test_texture_gen.cc | 134 + 13 files changed, 2136 insertions(+), 1843 deletions(-) create mode 100644 dense_map/geometry_mapper/include/camera_image.h create mode 100644 dense_map/geometry_mapper/tools/test_texture_gen.cc diff --git a/dense_map/geometry_mapper/CMakeLists.txt b/dense_map/geometry_mapper/CMakeLists.txt index 2af71ed8..18c12e73 100644 --- a/dense_map/geometry_mapper/CMakeLists.txt +++ b/dense_map/geometry_mapper/CMakeLists.txt @@ -208,6 +208,10 @@ add_executable(orthoproject tools/orthoproject.cc) target_link_libraries(orthoproject geometry_mapper_lib gflags glog) +add_executable(test_texture_gen tools/test_texture_gen.cc) +target_link_libraries(test_texture_gen + geometry_mapper_lib gflags glog) + add_executable(old_orthoproject tools/old_orthoproject.cc) target_link_libraries(old_orthoproject geometry_mapper_lib gflags glog) diff --git a/dense_map/geometry_mapper/include/camera_image.h b/dense_map/geometry_mapper/include/camera_image.h new file mode 100644 index 00000000..b3568460 --- /dev/null +++ b/dense_map/geometry_mapper/include/camera_image.h @@ -0,0 +1,62 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#ifndef CAMERA_IMAGE_H_ +#define CAMERA_IMAGE_H_ + +#include + +namespace dense_map { + +// A class to encompass all known information about a camera +struct cameraImage { + // An index to look up the type of camera. This will equal the + // value ref_camera_type if and only if this is a reference + // camera. + int camera_type; + + // The timestamp for this camera (in floating point seconds since epoch) + // measured by the clock/cpu which is particular to this camera. + double timestamp; + + // The timestamp with an adjustment added to it to be in + // reference camera time + double ref_timestamp; + + // The timestamp of the closest cloud for this image, measured + // with the same clock as the 'timestamp' value. + double cloud_timestamp; + + // Indices to look up the reference cameras bracketing this camera + // in time. The two indices will have same value if and only if + // this is a reference camera. + int beg_ref_index; + int end_ref_index; + + // The image for this camera, in grayscale + cv::Mat image; + + // The corresponding depth cloud, for an image + depth camera. + // Will be empty and uninitialized for a camera lacking depth. + cv::Mat depth_cloud; +}; + +} // namespace dense_map + +#endif // CAMERA_IMAGE_H_ diff --git a/dense_map/geometry_mapper/include/dense_map_utils.h b/dense_map/geometry_mapper/include/dense_map_utils.h index fcdc3360..4deefd77 100644 --- a/dense_map/geometry_mapper/include/dense_map_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_utils.h @@ -216,16 +216,6 @@ void pickTimestampsInBounds(std::vector const& timestamps, double left_b // Must always have NUM_EXIF the last. enum ExifData { TIMESTAMP = 0, EXPOSURE_TIME, ISO, APERTURE, FOCAL_LENGTH, NUM_EXIF }; -// Triangulate two rays emanating from given undistorted and centered pixels -Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eigen::Affine3d const& world_to_cam1, - Eigen::Affine3d const& world_to_cam2, Eigen::Vector2d const& pix1, - Eigen::Vector2d const& pix2); - -// Triangulate n rays emanating from given undistorted and centered pixels -Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, - std::vector const& world_to_cam_vec, - std::vector const& pix_vec); - // A utility for saving a camera in a format ASP understands. // TODO(oalexan1): Expose the sci cam intrinsics rather than having // them hard-coded. diff --git a/dense_map/geometry_mapper/include/interest_point.h b/dense_map/geometry_mapper/include/interest_point.h index d505d573..45ea3b4f 100644 --- a/dense_map/geometry_mapper/include/interest_point.h +++ b/dense_map/geometry_mapper/include/interest_point.h @@ -21,16 +21,19 @@ #define INTEREST_POINT_H_ #include +#include #include #include + #include #include #include #include +#include #include #include @@ -146,6 +149,7 @@ struct InterestPoint { }; // End class InterestPoint typedef std::pair, std::vector > MATCH_PAIR; +typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; void detectFeatures(const cv::Mat& image, bool verbose, // Outputs @@ -166,6 +170,30 @@ void saveImagesAndMatches(std::string const& left_prefix, std::string const& rig std::pair const& index_pair, MATCH_PAIR const& match_pair, std::vector const& images); +// Triangulate two rays emanating from given undistorted and centered pixels +Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eigen::Affine3d const& world_to_cam1, + Eigen::Affine3d const& world_to_cam2, Eigen::Vector2d const& pix1, + Eigen::Vector2d const& pix2); + +// Triangulate n rays emanating from given undistorted and centered pixels +Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, + std::vector const& world_to_cam_vec, + std::vector const& pix_vec); + +struct cameraImage; + +void detectMatchFeatures( // Inputs + std::vector const& cams, + std::vector const& cam_names, + std::vector const& cam_params, + std::vector const& world_to_cam, int num_overlaps, + int initial_max_reprojection_error, int num_match_threads, + bool verbose, + // Outputs + std::vector>>& keypoint_vec, + std::vector>& pid_to_cid_fid, + std::vector & image_files); + } // namespace dense_map #endif // INTEREST_POINT_H_ diff --git a/dense_map/geometry_mapper/include/texture_processing.h b/dense_map/geometry_mapper/include/texture_processing.h index d64bcbdf..4bf09af0 100644 --- a/dense_map/geometry_mapper/include/texture_processing.h +++ b/dense_map/geometry_mapper/include/texture_processing.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -77,7 +78,7 @@ struct FaceInfo { int64_t width, height; // The padding to apply to each face bounding box before sampling it - int64_t padding; + int64_t face_info_padding; // The pixel at (x, min_y, min_z) in the plane will end up at location (shift_u, shift_v) // in the texture. @@ -100,7 +101,7 @@ struct FaceInfo { min_z = 0.0; width = 0L; height = 0L; - padding = 0L; + face_info_padding = 1L; valid = true; shift_u = std::numeric_limits::max(); shift_v = 0L; @@ -119,7 +120,7 @@ class IsaacTexturePatch { typedef std::shared_ptr Ptr; typedef std::shared_ptr ConstPtr; typedef std::vector Faces; - typedef std::vector Texcoords; + typedef std::vector Texcoords; private: int64_t label; @@ -129,21 +130,23 @@ class IsaacTexturePatch { public: /** Constructs a texture patch. */ - IsaacTexturePatch(int64_t label, std::vector const& faces, std::vector const& texcoords, + IsaacTexturePatch(int64_t label, std::vector const& faces, + std::vector const& texcoords, int64_t width, int64_t height); IsaacTexturePatch(IsaacTexturePatch const& texture_patch); static IsaacTexturePatch::Ptr create(IsaacTexturePatch::ConstPtr texture_patch); static IsaacTexturePatch::Ptr create(int64_t label, std::vector const& faces, - std::vector const& texcoords, int64_t width, int64_t height); + std::vector const& texcoords, + int64_t width, int64_t height); IsaacTexturePatch::Ptr duplicate(void); std::vector& get_faces(void); std::vector const& get_faces(void) const; - std::vector & get_texcoords(void); - std::vector const& get_texcoords(void) const; + std::vector & get_texcoords(void); + std::vector const& get_texcoords(void) const; int64_t get_label(void) const; int64_t get_width(void) const; @@ -152,13 +155,14 @@ class IsaacTexturePatch { }; inline IsaacTexturePatch::IsaacTexturePatch(int64_t label, std::vector const& faces, - std::vector const& texcoords, int64_t width, int64_t height) + std::vector const& texcoords, + int64_t width, int64_t height) : label(label), faces(faces), texcoords(texcoords), width(width), height(height) {} IsaacTexturePatch::IsaacTexturePatch(IsaacTexturePatch const& texture_patch) { label = texture_patch.label; faces = std::vector(texture_patch.faces); - texcoords = std::vector(texture_patch.texcoords); + texcoords = std::vector(texture_patch.texcoords); width = texture_patch.width; height = texture_patch.height; } @@ -167,9 +171,9 @@ inline IsaacTexturePatch::Ptr IsaacTexturePatch::create(IsaacTexturePatch::Const return std::make_shared(*texture_patch); } -inline IsaacTexturePatch::Ptr IsaacTexturePatch::create(int64_t label, std::vector const& faces, - std::vector const& texcoords, int64_t width, - int64_t height) { +inline IsaacTexturePatch::Ptr IsaacTexturePatch::create( + int64_t label, std::vector const& faces, + std::vector const& texcoords, int64_t width, int64_t height) { return std::make_shared(label, faces, texcoords, width, height); } @@ -183,13 +187,13 @@ inline int64_t IsaacTexturePatch::get_width(void) const { return width; } inline int64_t IsaacTexturePatch::get_height(void) const { return height; } -inline std::vector& IsaacTexturePatch::get_texcoords(void) { +inline std::vector& IsaacTexturePatch::get_texcoords(void) { return texcoords; } inline std::vector& IsaacTexturePatch::get_faces(void) { return faces; } -inline std::vector const& IsaacTexturePatch::get_texcoords(void) const { +inline std::vector const& IsaacTexturePatch::get_texcoords(void) const { return texcoords; } @@ -198,7 +202,8 @@ inline std::vector const& IsaacTexturePatch::get_faces(void) const inline int64_t IsaacTexturePatch::get_size(void) const { return get_width() * get_height(); } /** - * Class representing a texture atlas. + * Class representing a texture atlas. Have to duplicate the code from texrecon + * as there are custom changes for ISAAC. */ class IsaacTextureAtlas { public: @@ -206,14 +211,13 @@ class IsaacTextureAtlas { typedef std::vector Faces; typedef std::vector TexcoordIds; - typedef std::vector Texcoords; + typedef std::vector Texcoords; int64_t get_width(); int64_t get_height(); private: int64_t width, height, determined_height; - int64_t const padding; bool finalized; Faces faces; @@ -263,6 +267,80 @@ inline int64_t IsaacTextureAtlas::get_width() { return width; } inline int64_t IsaacTextureAtlas::get_height() { return height; } + +/** + * Class representing a obj model. Have to duplicate the texrecon structure, as + * we use double-precision texcoords. + */ +class IsaacObjModel { + public: + struct Face { + std::size_t vertex_ids[3]; + std::size_t texcoord_ids[3]; + std::size_t normal_ids[3]; + }; + + struct Group { + std::string material_name; + std::vector faces; + }; + + typedef std::vector Vertices; + typedef std::vector TexCoords; + typedef std::vector Normals; + typedef std::vector Groups; + + private: + Vertices vertices; + TexCoords texcoords; + Normals normals; + Groups groups; + MaterialLib material_lib; + + public: + /** Saves the obj model to an .obj file, its material lib and the + materials with the given prefix. */ + void save_to_files(std::string const & prefix) const; + + MaterialLib & get_material_lib(void); + Vertices & get_vertices(void); + TexCoords & get_texcoords(void); + Normals & get_normals(void); + Groups & get_groups(void); + + static void save(IsaacObjModel const & model, std::string const & prefix); +}; + +inline +MaterialLib & +IsaacObjModel::get_material_lib(void) { + return material_lib; +} + +inline +IsaacObjModel::Vertices & +IsaacObjModel::get_vertices(void) { + return vertices; +} + +inline +IsaacObjModel::TexCoords & +IsaacObjModel::get_texcoords(void) { + return texcoords; +} + +inline +IsaacObjModel::Normals & +IsaacObjModel::get_normals(void) { + return normals; +} + +inline +IsaacObjModel::Groups & +IsaacObjModel::get_groups(void) { + return groups; +} + // Load and prepare a mesh void loadMeshBuildTree(std::string const& mesh_file, mve::TriangleMesh::Ptr& mesh, std::shared_ptr& mesh_info, @@ -271,11 +349,12 @@ void loadMeshBuildTree(std::string const& mesh_file, mve::TriangleMesh::Ptr& mes void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_threads, // outputs - std::vector& face_projection_info, std::vector& texture_atlases, - tex::Model& model); + std::vector& face_projection_info, + std::vector& texture_atlases, + IsaacObjModel& model); // Put an textured mesh obj file in a string -void formObj(tex::Model& texture_model, std::string const& out_prefix, std::string& obj_str); +void formObj(IsaacObjModel& texture_model, std::string const& out_prefix, std::string& obj_str); // Put an textured mesh obj file in a string void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector const& face_vec, @@ -299,17 +378,31 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Project texture on a texture model that was pre-filled already, so // only the texture pixel values need to be computed -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, - camera::CameraModel const& cam, std::vector& smallest_cost_per_face, double pixel_size, +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, double pixel_size, int64_t num_threads, std::vector const& face_projection_info, - std::vector& texture_atlases, tex::Model& model, cv::Mat& out_texture); - -void meshProject(mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, cv::Mat const& image, + std::vector& texture_atlases, + IsaacObjModel& model, cv::Mat& out_texture); + +// Find where ray emanating from a distorted pixel intersects a mesh. Return true +// on success. +bool ray_mesh_intersect(Eigen::Vector2d const& dist_pix, + camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + double min_ray_dist, double max_ray_dist, + // Output + Eigen::Vector3d& intersection); + +void meshProject(mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, + cv::Mat const& image, Eigen::Affine3d const& world_to_cam, camera::CameraParameters const& cam_params, int64_t num_exclude_boundary_pixels, std::string const& out_prefix); // Save a model -void isaac_save_model(ObjModel* obj_model, std::string const& prefix); +void isaac_save_model(IsaacObjModel* obj_model, std::string const& prefix); } // namespace dense_map diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 8f6c0360..2f37cb46 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -1575,6 +1575,12 @@ If the ``--out_texture_dir`` option is specified, the tool will create textured meshes for each image and optimized camera at the end. Ideally those textured meshes will agree among each other. +### The algorithm + +See camera_refiner.cc for a lengthy explanation of the algorithm. + +### Camera refiner options + This program's options are: --ros_bag (string, default = "") @@ -1677,6 +1683,10 @@ This program's options are: Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config file with this value. + --sci_cam_timestamps (string, default = "") + Use only these sci cam timestamps. Must be a file with one + timestamp per line. + --depth_tri_weight (double, default = 1000.0) The weight to give to the constraint that depth measurements agree with triangulated points. Use a bigger number as depth @@ -1729,9 +1739,6 @@ This program's options are: other relative to the triangulated points, so care is needed here. - --refiner_skip_filtering (bool, false unless specified) - Do not do any outlier filtering. - --out_texture_dir (string, default = "") If non-empty and if an input mesh was provided, project the camera images using the optimized poses onto the mesh and write @@ -1777,9 +1784,9 @@ This program's options are: --num_opt_threads (int32, default = 16) How many threads to use in the optimization. - --sci_cam_timestamps (string, default = "") - Use only these sci cam timestamps. Must be a file with one - timestamp per line. + --num_match_threads (int32, default = 8) + How many threads to use in feature detection/matching. + A large number can use a lot of memory. --verbose (bool, false unless specified) Print the residuals and save the images and match files. Stereo @@ -1809,6 +1816,11 @@ at the end, replacing the fisheye model, and from then on the new model can be used for further calibration experiments just as with the fisheye model. +It may however be needed to rerun the refiner one more time, this time +with the new distortion model read from disk, and still keep all +intrinsics and extrinsics (including the sparse map and depth to +image) fixed, except for the nav cam distortion, to fully tighten it. + Since it is expected that fitting such a model is harder at the periphery, where the distortion is stronger, the camera refiner has the option ``--nav_cam_num_exclude_boundary_pixels`` can be used to diff --git a/dense_map/geometry_mapper/src/dense_map_utils.cc b/dense_map/geometry_mapper/src/dense_map_utils.cc index 5b89e2fe..043c9721 100644 --- a/dense_map/geometry_mapper/src/dense_map_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_utils.cc @@ -24,17 +24,6 @@ #include -// Get rid of warning beyond our control -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic push -#include -#include -#include -#include -#include -#pragma GCC diagnostic pop - #include #include @@ -44,7 +33,8 @@ namespace dense_map { // A function to split a string like 'optical_center focal_length' into // its two constituents. -void parse_intrinsics_to_float(std::string const& intrinsics_to_float, std::set& intrinsics_to_float_set) { +void parse_intrinsics_to_float(std::string const& intrinsics_to_float, + std::set& intrinsics_to_float_set) { intrinsics_to_float_set.clear(); std::string val; std::istringstream is(intrinsics_to_float); @@ -954,58 +944,6 @@ void pickTimestampsInBounds(std::vector const& timestamps, double left_b return; } -// Triangulate rays emanating from given undistorted and centered pixels -Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, - Eigen::Affine3d const& world_to_cam1, - Eigen::Affine3d const& world_to_cam2, - Eigen::Vector2d const& pix1, - Eigen::Vector2d const& pix2) { - Eigen::Matrix3d k1; - k1 << focal_length1, 0, 0, 0, focal_length1, 0, 0, 0, 1; - - Eigen::Matrix3d k2; - k2 << focal_length2, 0, 0, 0, focal_length2, 0, 0, 0, 1; - - openMVG::Mat34 cid_to_p1, cid_to_p2; - openMVG::P_From_KRt(k1, world_to_cam1.linear(), world_to_cam1.translation(), &cid_to_p1); - openMVG::P_From_KRt(k2, world_to_cam2.linear(), world_to_cam2.translation(), &cid_to_p2); - - openMVG::Triangulation tri; - tri.add(cid_to_p1, pix1); - tri.add(cid_to_p2, pix2); - - Eigen::Vector3d solution = tri.compute(); - return solution; -} - -// Triangulate n rays emanating from given undistorted and centered pixels -Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, - std::vector const& world_to_cam_vec, - std::vector const& pix_vec) { - if (focal_length_vec.size() != world_to_cam_vec.size() || - focal_length_vec.size() != pix_vec.size()) - LOG(FATAL) << "All inputs to Triangulate() must have the same size."; - - if (focal_length_vec.size() <= 1) - LOG(FATAL) << "At least two rays must be passed to Triangulate()."; - - openMVG::Triangulation tri; - - for (size_t it = 0; it < focal_length_vec.size(); it++) { - Eigen::Matrix3d k; - k << focal_length_vec[it], 0, 0, 0, focal_length_vec[it], 0, 0, 0, 1; - - openMVG::Mat34 cid_to_p; - openMVG::P_From_KRt(k, world_to_cam_vec[it].linear(), world_to_cam_vec[it].translation(), - &cid_to_p); - - tri.add(cid_to_p, pix_vec[it]); - } - - Eigen::Vector3d solution = tri.compute(); - return solution; -} - // A debug utility for saving a camera in a format ASP understands. // Need to expose the sci cam intrinsics. void save_tsai_camera(Eigen::MatrixXd const& desired_cam_to_world_trans, std::string const& output_dir, diff --git a/dense_map/geometry_mapper/src/interest_point.cc b/dense_map/geometry_mapper/src/interest_point.cc index 7cef6c1c..17ed579e 100644 --- a/dense_map/geometry_mapper/src/interest_point.cc +++ b/dense_map/geometry_mapper/src/interest_point.cc @@ -22,8 +22,20 @@ #include #include // from the isaac repo +#include // from the isaac repo #include // from the astrobee repo +// Get rid of warnings beyond our control +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic push +#include +#include +#include +#include +#include +#pragma GCC diagnostic pop + #include #include @@ -155,6 +167,131 @@ void matchFeatures(std::mutex* match_mutex, int left_image_index, int right_imag match_mutex->unlock(); } + +// Match features while assuming that the input cameras can be used to filter out +// outliers by reprojection error. +void matchFeaturesWithCams(std::mutex* match_mutex, int left_image_index, int right_image_index, + camera::CameraParameters const& left_params, + camera::CameraParameters const& right_params, + Eigen::Affine3d const& left_world_to_cam, + Eigen::Affine3d const& right_world_to_cam, + double reprojection_error, + cv::Mat const& left_descriptors, cv::Mat const& right_descriptors, + Eigen::Matrix2Xd const& left_keypoints, + Eigen::Matrix2Xd const& right_keypoints, + bool verbose, + // output + MATCH_PAIR* matches) { + // Match by using descriptors first + std::vector cv_matches; + interest_point::FindMatches(left_descriptors, right_descriptors, &cv_matches); + + // Do filtering + std::vector left_vec; + std::vector right_vec; + std::vector filtered_cv_matches; + for (size_t j = 0; j < cv_matches.size(); j++) { + int left_ip_index = cv_matches.at(j).queryIdx; + int right_ip_index = cv_matches.at(j).trainIdx; + + Eigen::Vector2d dist_left_ip(left_keypoints.col(left_ip_index)[0], + left_keypoints.col(left_ip_index)[1]); + + Eigen::Vector2d dist_right_ip(right_keypoints.col(right_ip_index)[0], + right_keypoints.col(right_ip_index)[1]); + + Eigen::Vector2d undist_left_ip; + Eigen::Vector2d undist_right_ip; + left_params.Convert + (dist_left_ip, &undist_left_ip); + right_params.Convert + (dist_right_ip, &undist_right_ip); + + Eigen::Vector3d X = + dense_map::TriangulatePair(left_params.GetFocalLength(), right_params.GetFocalLength(), + left_world_to_cam, right_world_to_cam, + undist_left_ip, undist_right_ip); + + // Project back into the cameras + Eigen::Vector3d left_cam_X = left_world_to_cam * X; + Eigen::Vector2d undist_left_pix + = left_params.GetFocalVector().cwiseProduct(left_cam_X.hnormalized()); + Eigen::Vector2d dist_left_pix; + left_params.Convert(undist_left_pix, + &dist_left_pix); + + Eigen::Vector3d right_cam_X = right_world_to_cam * X; + Eigen::Vector2d undist_right_pix + = right_params.GetFocalVector().cwiseProduct(right_cam_X.hnormalized()); + Eigen::Vector2d dist_right_pix; + right_params.Convert(undist_right_pix, + &dist_right_pix); + + // Filter out points whose reprojection error is too big + bool is_good = ((dist_left_ip - dist_left_pix).norm() <= reprojection_error && + (dist_right_ip - dist_right_pix).norm() <= reprojection_error); + + // If any values above are Inf or NaN, is_good will be false as well + if (!is_good) continue; + + // Get the keypoints from the good matches + left_vec.push_back(cv::Point2f(left_keypoints.col(left_ip_index)[0], + left_keypoints.col(left_ip_index)[1])); + right_vec.push_back(cv::Point2f(right_keypoints.col(right_ip_index)[0], + right_keypoints.col(right_ip_index)[1])); + + filtered_cv_matches.push_back(cv_matches[j]); + } + + if (left_vec.empty()) return; + + // Filter using geometry constraints + // These may need some tweaking but works reasonably well. + double ransacReprojThreshold = 20.0; + cv::Mat inlier_mask; + int maxIters = 10000; + double confidence = 0.8; + + // affine2D works better than homography + // cv::Mat H = cv::findHomography(left_vec, right_vec, cv::RANSAC, + // ransacReprojThreshold, inlier_mask, maxIters, confidence); + cv::Mat H = cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, + ransacReprojThreshold, maxIters, confidence); + + std::vector left_ip, right_ip; + for (size_t j = 0; j < filtered_cv_matches.size(); j++) { + int left_ip_index = filtered_cv_matches.at(j).queryIdx; + int right_ip_index = filtered_cv_matches.at(j).trainIdx; + + if (inlier_mask.at(j, 0) == 0) continue; + + cv::Mat left_desc = left_descriptors.row(left_ip_index); + cv::Mat right_desc = right_descriptors.row(right_ip_index); + + InterestPoint left; + left.setFromCvKeypoint(left_keypoints.col(left_ip_index), left_desc); + + InterestPoint right; + right.setFromCvKeypoint(right_keypoints.col(right_ip_index), right_desc); + + left_ip.push_back(left); + right_ip.push_back(right); + } + + // Update the shared variable using a lock + match_mutex->lock(); + + // Print the verbose message inside the lock, otherwise the text + // may get messed up. + if (verbose) + std::cout << "Number of matches for pair " + << left_image_index << ' ' << right_image_index << ": " + << left_ip.size() << std::endl; + + *matches = std::make_pair(left_ip, right_ip); + match_mutex->unlock(); +} + void writeIpRecord(std::ofstream& f, InterestPoint const& p) { f.write(reinterpret_cast(&(p.x)), sizeof(p.x)); f.write(reinterpret_cast(&(p.y)), sizeof(p.y)); @@ -212,4 +349,263 @@ void saveImagesAndMatches(std::string const& left_prefix, std::string const& rig writeMatchFile(match_file, match_pair.first, match_pair.second); } +// Triangulate rays emanating from given undistorted and centered pixels +Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, + Eigen::Affine3d const& world_to_cam1, + Eigen::Affine3d const& world_to_cam2, + Eigen::Vector2d const& pix1, + Eigen::Vector2d const& pix2) { + Eigen::Matrix3d k1; + k1 << focal_length1, 0, 0, 0, focal_length1, 0, 0, 0, 1; + + Eigen::Matrix3d k2; + k2 << focal_length2, 0, 0, 0, focal_length2, 0, 0, 0, 1; + + openMVG::Mat34 cid_to_p1, cid_to_p2; + openMVG::P_From_KRt(k1, world_to_cam1.linear(), world_to_cam1.translation(), &cid_to_p1); + openMVG::P_From_KRt(k2, world_to_cam2.linear(), world_to_cam2.translation(), &cid_to_p2); + + openMVG::Triangulation tri; + tri.add(cid_to_p1, pix1); + tri.add(cid_to_p2, pix2); + + Eigen::Vector3d solution = tri.compute(); + return solution; +} + +// Triangulate n rays emanating from given undistorted and centered pixels +Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, + std::vector const& world_to_cam_vec, + std::vector const& pix_vec) { + if (focal_length_vec.size() != world_to_cam_vec.size() || + focal_length_vec.size() != pix_vec.size()) + LOG(FATAL) << "All inputs to Triangulate() must have the same size."; + + if (focal_length_vec.size() <= 1) + LOG(FATAL) << "At least two rays must be passed to Triangulate()."; + + openMVG::Triangulation tri; + + for (size_t it = 0; it < focal_length_vec.size(); it++) { + Eigen::Matrix3d k; + k << focal_length_vec[it], 0, 0, 0, focal_length_vec[it], 0, 0, 0, 1; + + openMVG::Mat34 cid_to_p; + openMVG::P_From_KRt(k, world_to_cam_vec[it].linear(), world_to_cam_vec[it].translation(), + &cid_to_p); + + tri.add(cid_to_p, pix_vec[it]); + } + + Eigen::Vector3d solution = tri.compute(); + return solution; +} + +void detectMatchFeatures( // Inputs + std::vector const& cams, + std::vector const& cam_names, + std::vector const& cam_params, + std::vector const& world_to_cam, int num_overlaps, + int initial_max_reprojection_error, int num_match_threads, + bool verbose, + // Outputs + std::vector>>& keypoint_vec, + std::vector>& pid_to_cid_fid, + std::vector & image_files) { + // Wipe the outputs + keypoint_vec.clear(); + pid_to_cid_fid.clear(); + image_files.clear(); + + if (verbose) { + int count = 10000; + for (size_t it = 0; it < cams.size(); it++) { + std::ostringstream oss; + oss << count << "_" << cam_names[cams[it].camera_type] << ".jpg"; + std::string name = oss.str(); + std::cout << "Writing: " << name << std::endl; + cv::imwrite(name, cams[it].image); + count++; + image_files.push_back(name); + } + } + + // Detect features using multiple threads. Too many threads may result + // in high memory usage. + std::ostringstream oss; + oss << num_match_threads; + std::string num_threads = oss.str(); + google::SetCommandLineOption("num_threads", num_threads.c_str()); + if (!gflags::GetCommandLineOption("num_threads", &num_threads)) + LOG(FATAL) << "Failed to get the value of --num_threads in Astrobee software.\n"; + std::cout << "Using " << num_threads << " threads for feature detection/matching." << std::endl; + + std::cout << "Detecting features." << std::endl; + + std::vector cid_to_descriptor_map; + std::vector cid_to_keypoint_map; + cid_to_descriptor_map.resize(cams.size()); + cid_to_keypoint_map.resize(cams.size()); + { + // Make the thread pool go out of scope when not needed to not use up memory + ff_common::ThreadPool thread_pool; + for (size_t it = 0; it < cams.size(); it++) { + thread_pool.AddTask + (&dense_map::detectFeatures, // multi-thread // NOLINT + // dense_map::detectFeatures( // single-thread // NOLINT + cams[it].image, verbose, &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + } + thread_pool.Join(); + } + + MATCH_MAP matches; + + std::vector > image_pairs; + for (size_t it1 = 0; it1 < cams.size(); it1++) { + for (size_t it2 = it1 + 1; it2 < std::min(cams.size(), it1 + num_overlaps + 1); it2++) { + image_pairs.push_back(std::make_pair(it1, it2)); + } + } + + { + std::cout << "Matching features." << std::endl; + ff_common::ThreadPool thread_pool; + std::mutex match_mutex; + for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { + auto pair = image_pairs[pair_it]; + int left_image_it = pair.first, right_image_it = pair.second; + thread_pool.AddTask + (&dense_map::matchFeaturesWithCams, // multi-threaded // NOLINT + // dense_map::matchFeaturesWithCams( // single-threaded // NOLINT + &match_mutex, left_image_it, right_image_it, cam_params[cams[left_image_it].camera_type], + cam_params[cams[right_image_it].camera_type], world_to_cam[left_image_it], + world_to_cam[right_image_it], initial_max_reprojection_error, + cid_to_descriptor_map[left_image_it], cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], cid_to_keypoint_map[right_image_it], verbose, + &matches[pair]); + } + thread_pool.Join(); + } + cid_to_descriptor_map = std::vector(); // Wipe, takes memory + + // Give all interest points in a given image a unique id, and put + // them in a vector with the id corresponding to the interest point + std::vector, int>> keypoint_map(cams.size()); + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + + int left_index = index_pair.first; + int right_index = index_pair.second; + + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + // Initialize to zero for the moment + keypoint_map[left_index][dist_left_ip] = 0; + keypoint_map[right_index][dist_right_ip] = 0; + } + } + keypoint_vec.resize(cams.size()); + for (size_t cid = 0; cid < cams.size(); cid++) { + keypoint_vec[cid].resize(keypoint_map[cid].size()); + int fid = 0; + for (auto ip_it = keypoint_map[cid].begin(); ip_it != keypoint_map[cid].end(); + ip_it++) { + auto& dist_ip = ip_it->first; // alias + keypoint_map[cid][dist_ip] = fid; + keypoint_vec[cid][fid] = dist_ip; + fid++; + } + } + + // If feature A in image I matches feather B in image J, which + // matches feature C in image K, then (A, B, C) belong together in + // a track, and will have a single triangulated xyz. Build such a track. + + openMVG::matching::PairWiseMatches match_map; + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + + int left_index = index_pair.first; + int right_index = index_pair.second; + + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + + std::vector mvg_matches; + + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + + int left_id = keypoint_map[left_index][dist_left_ip]; + int right_id = keypoint_map[right_index][dist_right_ip]; + mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); + } + match_map[index_pair] = mvg_matches; + } + + if (verbose) { + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; + + int left_index = index_pair.first; + int right_index = index_pair.second; + + std::string left_image = image_files[left_index]; + std::string right_image = image_files[right_index]; + + std::string left_stem = boost::filesystem::path(left_image).stem().string(); + std::string right_stem = boost::filesystem::path(right_image).stem().string(); + + std::string match_file = left_stem + "__" + right_stem + ".match"; + + std::cout << "Writing: " << left_image << ' ' << right_image << ' ' + << match_file << std::endl; + dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); + } + } + + // De-allocate data not needed anymore and take up a lot of RAM + matches.clear(); matches = MATCH_MAP(); + keypoint_map.clear(); keypoint_map.shrink_to_fit(); + cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); + + { + // Build tracks + // De-allocate these as soon as not needed to save memory + openMVG::tracks::TracksBuilder trackBuilder; + trackBuilder.Build(match_map); // Build: Efficient fusion of correspondences + trackBuilder.Filter(); // Filter: Remove tracks that have conflict + // trackBuilder.ExportToStream(std::cout); + // Export tracks as a map (each entry is a sequence of imageId and featureIndex): + // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} + openMVG::tracks::STLMAPTracks map_tracks; + trackBuilder.ExportToSTL(map_tracks); + match_map = openMVG::matching::PairWiseMatches(); // wipe this, no longer needed + trackBuilder = openMVG::tracks::TracksBuilder(); // wipe it + + if (map_tracks.empty()) + LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; + + // Populate the filtered tracks + size_t num_elems = map_tracks.size(); + pid_to_cid_fid.resize(num_elems); + size_t curr_id = 0; + for (auto itr = map_tracks.begin(); itr != map_tracks.end(); itr++) { + for (auto itr2 = (itr->second).begin(); itr2 != (itr->second).end(); itr2++) { + pid_to_cid_fid[curr_id][itr2->first] = itr2->second; + } + curr_id++; + } + } + + return; +} + } // end namespace dense_map diff --git a/dense_map/geometry_mapper/src/texture_processing.cc b/dense_map/geometry_mapper/src/texture_processing.cc index 339f86e3..8a7bb1bb 100644 --- a/dense_map/geometry_mapper/src/texture_processing.cc +++ b/dense_map/geometry_mapper/src/texture_processing.cc @@ -55,6 +55,14 @@ #include #include +// TODO(oalexan1): Consider applying TILE_PADDING not to at the atlas +// forming stage, when the patch is padded, but earlier, right when +// the patch is created from a mesh triangle. That will ensure that +// while the same overall buffer is used, the padding region will have +// actual image samples, which may improve the rendering in meshlab +// when one zooms out, where now for some reason the triangle edges +// show up. Can test the output with the test_texture_gen tool. + namespace dense_map { // Use int64_t values as much as possible when it comes to dealing with @@ -64,7 +72,7 @@ const int64_t NUM_CHANNELS = 4; // BRGA const int64_t TILE_PADDING = 4; IsaacTextureAtlas::IsaacTextureAtlas(int64_t width, int64_t height) - : width(width), height(height), determined_height(0), padding(TILE_PADDING), finalized(false) { + : width(width), height(height), determined_height(0), finalized(false) { bin = RectangularBin::create(width, height); // Do not create the image buffer yet, we don't need it until after its precise @@ -102,8 +110,8 @@ bool IsaacTextureAtlas::insert(IsaacTexturePatch::ConstPtr texture_patch) { assert(bin != NULL); - int64_t width = texture_patch->get_width() + 2 * padding; - int64_t height = texture_patch->get_height() + 2 * padding; + int64_t width = texture_patch->get_width() + 2 * TILE_PADDING; + int64_t height = texture_patch->get_height() + 2 * TILE_PADDING; Rect rect = Rect(0, 0, width, height); if (!bin->insert(&rect)) return false; @@ -112,22 +120,22 @@ bool IsaacTextureAtlas::insert(IsaacTexturePatch::ConstPtr texture_patch) { // Note how we don't bother to do any copying, as at this stage we // only care for the structure - // copy_into(patch_image, rect.min_x, rect.min_y, image, padding); + // copy_into(patch_image, rect.min_x, rect.min_y, image, TILE_PADDING); IsaacTexturePatch::Faces const& patch_faces = texture_patch->get_faces(); // alias IsaacTexturePatch::Texcoords const& patch_texcoords = texture_patch->get_texcoords(); // alias // Calculate where the patch will go after insertion - math::Vec2f offset = math::Vec2f(rect.min_x + padding, rect.min_y + padding); + Eigen::Vector2d offset = Eigen::Vector2d(rect.min_x + TILE_PADDING, rect.min_y + TILE_PADDING); faces.insert(faces.end(), patch_faces.begin(), patch_faces.end()); // Insert the texcoords in the right place for (std::size_t i = 0; i < patch_faces.size(); i++) { for (int64_t j = 0; j < 3; j++) { - math::Vec2f rel_texcoord(patch_texcoords[i * 3 + j]); + Eigen::Vector2d rel_texcoord(patch_texcoords[i * 3 + j]); - math::Vec2f texcoord = rel_texcoord + offset; + Eigen::Vector2d texcoord = rel_texcoord + offset; // Delay this normalization until we know the final width and // height of the atlas. @@ -142,12 +150,12 @@ bool IsaacTextureAtlas::insert(IsaacTexturePatch::ConstPtr texture_patch) { } struct VectorCompare { - bool operator()(math::Vec2f const& lhs, math::Vec2f const& rhs) const { + bool operator()(Eigen::Vector2d const& lhs, Eigen::Vector2d const& rhs) const { return lhs[0] < rhs[0] || (lhs[0] == rhs[0] && lhs[1] < rhs[1]); } }; -typedef std::map TexcoordMap; +typedef std::map TexcoordMap; void IsaacTextureAtlas::merge_texcoords() { Texcoords tmp; @@ -155,7 +163,7 @@ void IsaacTextureAtlas::merge_texcoords() { // Do not remove duplicates, as that messes up the book-keeping TexcoordMap texcoord_map; - for (math::Vec2f const& texcoord : tmp) { + for (Eigen::Vector2d const& texcoord : tmp) { TexcoordMap::iterator iter = texcoord_map.find(texcoord); // if (iter == texcoord_map.end()) { std::size_t texcoord_id = this->texcoords.size(); @@ -170,10 +178,11 @@ void IsaacTextureAtlas::merge_texcoords() { // Set the final height by removing unused space. Allocate the image buffer. void IsaacTextureAtlas::resize_atlas() { - // Round up a little to make it a multiple of 2 * padding. Not strictly necessary + // Round up a little to make it a multiple of 2 * TILE_PADDING. Not strictly necessary // but looks nicer that way. + int64_t factor = 2 * TILE_PADDING; this->determined_height - = (2 * padding) * (ceil(this->determined_height / static_cast(2 * padding))); + = factor * ceil(this->determined_height / static_cast(factor)); this->determined_height = std::min(this->height, this->determined_height); this->height = this->determined_height; @@ -200,18 +209,18 @@ void IsaacTextureAtlas::finalize() { void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, std::vector const& texture_atlases, - ObjModel* obj_model) { + IsaacObjModel* obj_model) { mve::TriangleMesh::VertexList const& mesh_vertices = mesh->get_vertices(); mve::TriangleMesh::NormalList const& mesh_normals = mesh->get_vertex_normals(); mve::TriangleMesh::FaceList const& mesh_faces = mesh->get_faces(); - ObjModel::Vertices& vertices = obj_model->get_vertices(); + IsaacObjModel::Vertices& vertices = obj_model->get_vertices(); vertices.insert(vertices.begin(), mesh_vertices.begin(), mesh_vertices.end()); - ObjModel::Normals& normals = obj_model->get_normals(); + IsaacObjModel::Normals& normals = obj_model->get_normals(); normals.insert(normals.begin(), mesh_normals.begin(), mesh_normals.end()); - ObjModel::TexCoords& texcoords = obj_model->get_texcoords(); + IsaacObjModel::TexCoords& texcoords = obj_model->get_texcoords(); - ObjModel::Groups& groups = obj_model->get_groups(); + IsaacObjModel::Groups& groups = obj_model->get_groups(); MaterialLib& material_lib = obj_model->get_material_lib(); for (IsaacTextureAtlas::Ptr texture_atlas : texture_atlases) { @@ -221,8 +230,8 @@ void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, material.diffuse_map = texture_atlas->get_image(); material_lib.push_back(material); - groups.push_back(ObjModel::Group()); - ObjModel::Group& group = groups.back(); + groups.push_back(IsaacObjModel::Group()); + IsaacObjModel::Group& group = groups.back(); group.material_name = material.name; IsaacTextureAtlas::Faces const& atlas_faces = texture_atlas->get_faces(); @@ -244,8 +253,8 @@ void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, texcoord_id_offset + atlas_texcoord_ids[i * 3 + 1], texcoord_id_offset + atlas_texcoord_ids[i * 3 + 2]}; - group.faces.push_back(ObjModel::Face()); - ObjModel::Face& face = group.faces.back(); + group.faces.push_back(IsaacObjModel::Face()); + IsaacObjModel::Face& face = group.faces.back(); std::copy(vertex_ids, vertex_ids + 3, face.vertex_ids); std::copy(texcoord_ids, texcoord_ids + 3, face.texcoord_ids); std::copy(normal_ids, normal_ids + 3, face.normal_ids); @@ -292,29 +301,46 @@ math::Vec3f eigen_to_vec3f(Eigen::Vector3d const& V) { return v; } -void calculate_texture_size(double height_factor, std::list const& texture_patches, +void calculate_texture_size(double height_factor, + std::list const& texture_patches, int64_t& texture_width, int64_t& texture_height) { int64_t total_area = 0; // this can be huge and may overflow with 32 bit int - int64_t max_width = 0; - int64_t max_height = 0; - int64_t padding = TILE_PADDING; - + int64_t max_patch_width = 0; + int64_t max_patch_height = 0; for (IsaacTexturePatch::ConstPtr texture_patch : texture_patches) { - int64_t width = texture_patch->get_width() + 2 * padding; - int64_t height = texture_patch->get_height() + 2 * padding; + int64_t width = texture_patch->get_width() + 2 * TILE_PADDING; + int64_t height = texture_patch->get_height() + 2 * TILE_PADDING; - max_width = std::max(max_width, width); - max_height = std::max(max_height, height); + max_patch_width = std::max(max_patch_width, width); + max_patch_height = std::max(max_patch_height, height); int64_t area = width * height; total_area += area; } - texture_width = std::max(4L * 1024L, max_width); + // Estimate rough dimensions of the atlas. This can't be precise, + // since those little tiles may not fit with no gaps between + // them. The height will be first over-estimated later, and then + // re-adjusted in due time. + + // It is good to aim for roughly square dimensions, as otherwise one + // dimension may be too big, which may result in loss of precision + // in texcoords as those get scaled by image dimensions. + texture_width = ceil(sqrt(static_cast(total_area))); texture_height = ceil(static_cast(total_area / texture_width)); - texture_height = std::max(texture_height, max_height); - // This is a heuristic but it works well-enough the first time + // Adjust to ensure even the biggest patch can fit + texture_width = std::max(texture_width, max_patch_width); + texture_height = std::max(texture_height, max_patch_height); + + // Round up a little to make the dims a multiple of 2 * + // TILE_PADDING. Not strictly necessary but looks nicer that way. + int64_t factor = 2 * TILE_PADDING; + texture_width = factor * ceil(texture_width / static_cast(factor)); + texture_height = factor * ceil(texture_height / static_cast(factor)); + + // Give the height a margin to overestimate it. It will be adjusted after we finish + // putting all patches in the atlas. texture_height *= height_factor; } @@ -366,13 +392,13 @@ void merge_texture_atlases(std::vector* texture_atlases) dest_coords.clear(); for (size_t atlas_it = 0; atlas_it < prev_atlases.size(); atlas_it++) { IsaacTextureAtlas::Texcoords& prev_coords = prev_atlases[atlas_it]->get_texcoords(); - math::Vec2f offset = math::Vec2f(0, texcoord_start); + Eigen::Vector2d offset = Eigen::Vector2d(0, texcoord_start); IsaacTextureAtlas::Faces& prev_faces = prev_atlases[atlas_it]->get_faces(); for (size_t face_it = 0; face_it < prev_faces.size(); face_it++) { for (int64_t j = 0; j < 3; j++) { - math::Vec2f rel_texcoord(prev_coords[face_it * 3 + j]); - math::Vec2f texcoord = rel_texcoord + offset; + Eigen::Vector2d rel_texcoord(prev_coords[face_it * 3 + j]); + Eigen::Vector2d texcoord = rel_texcoord + offset; dest_coords.push_back(texcoord); } } @@ -465,13 +491,13 @@ void generate_texture_atlases(double height_factor, return; } -void isaac_save_model(ObjModel* obj_model, std::string const& prefix) { +void isaac_save_model(IsaacObjModel* obj_model, std::string const& prefix) { int64_t OBJ_INDEX_OFFSET = 1; - ObjModel::Vertices const& vertices = obj_model->get_vertices(); - ObjModel::Normals const& normals = obj_model->get_normals(); - ObjModel::TexCoords const& texcoords = obj_model->get_texcoords(); - ObjModel::Groups const& groups = obj_model->get_groups(); + IsaacObjModel::Vertices const& vertices = obj_model->get_vertices(); + IsaacObjModel::Normals const& normals = obj_model->get_normals(); + IsaacObjModel::TexCoords const& texcoords = obj_model->get_texcoords(); + IsaacObjModel::Groups const& groups = obj_model->get_groups(); MaterialLib const& material_lib = obj_model->get_material_lib(); material_lib.save_to_files(prefix); @@ -480,31 +506,31 @@ void isaac_save_model(ObjModel* obj_model, std::string const& prefix) { std::ofstream out((prefix + ".obj").c_str()); if (!out.good()) throw util::FileException(prefix + ".obj", std::strerror(errno)); - out << "mtllib " << name << ".mtl" << '\n'; + out << "mtllib " << name << ".mtl" << "\n"; - out << std::fixed << std::setprecision(8); - for (std::size_t i = 0; i < vertices.size(); i++) { - out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << '\n'; - } + out << std::setprecision(16); + for (std::size_t i = 0; i < vertices.size(); i++) + out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << "\n"; - for (std::size_t i = 0; i < texcoords.size(); i++) { - out << "vt " << texcoords[i][0] << " " << 1.0f - texcoords[i][1] << '\n'; - } + // Here use 1.0 rather than 1.0f to not lose precision + for (std::size_t i = 0; i < texcoords.size(); i++) + out << "vt " << texcoords[i][0] << " " << 1.0 - texcoords[i][1] << "\n"; - for (std::size_t i = 0; i < normals.size(); i++) { - out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << '\n'; - } + for (std::size_t i = 0; i < normals.size(); i++) + out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << "\n"; for (std::size_t i = 0; i < groups.size(); i++) { - out << "usemtl " << groups[i].material_name << '\n'; + out << "usemtl " << groups[i].material_name << "\n"; for (std::size_t j = 0; j < groups[i].faces.size(); j++) { - ObjModel::Face const& face = groups[i].faces[j]; + IsaacObjModel::Face const& face = groups[i].faces[j]; out << "f"; for (std::size_t k = 0; k < 3; ++k) { - out << " " << face.vertex_ids[k] + OBJ_INDEX_OFFSET << "/" << face.texcoord_ids[k] + OBJ_INDEX_OFFSET << "/" - << face.normal_ids[k] + OBJ_INDEX_OFFSET; + out << " " + << face.vertex_ids[k] + OBJ_INDEX_OFFSET << "/" // NOLINT + << face.texcoord_ids[k] + OBJ_INDEX_OFFSET << "/" // NOLINT + << face.normal_ids[k] + OBJ_INDEX_OFFSET; // NOLINT } - out << '\n'; + out << "\n"; } } out.close(); @@ -662,11 +688,13 @@ Triangle bias_triangle(Triangle const& tri, double bias) { void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_threads, // outputs - std::vector& face_projection_info, std::vector& texture_atlases, - tex::Model& model) { - omp_set_dynamic(0); // Explicitly disable dynamic teams - omp_set_num_threads(num_threads); // Use this many threads for all - // consecutive parallel regions + std::vector& face_projection_info, + std::vector& texture_atlases, + IsaacObjModel& model) { + // Explicitly disable dynamic determination of number of threads + omp_set_dynamic(0); + // Use this many threads for all consecutive parallel regions + omp_set_num_threads(num_threads); std::cout << "Forming the textured model, please wait ..." << std::endl; util::WallTimer timer; @@ -687,8 +715,9 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_ // Store here texture coords before they get packed in the model. // Ensure that this is initialized. - ObjModel::TexCoords input_texcoords(3 * num_faces); - for (size_t it = 0; it < input_texcoords.size(); it++) input_texcoords[it] = math::Vec2f(0.0f, 0.0f); + std::vector input_texcoords(3 * num_faces); + for (size_t it = 0; it < input_texcoords.size(); it++) + input_texcoords[it] = Eigen::Vector2d(0.0, 0.0); std::vector texture_patches(num_faces); @@ -705,7 +734,7 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_ std::vector patch_faces(1); patch_faces[0] = face_id; - std::vector face_texcoords(3); + std::vector face_texcoords(3); // Convert the triangle corners to Eigen so that we have double precision std::vector V(3); @@ -715,21 +744,6 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_ double area = 0.5 * ((V[1] - V[0]).cross(V[2] - V[0])).norm(); total_area += area; -#if 0 - // TODO(oalexan1): Sort this out - if (area == 0.0) { - std::cout << "Bad vertex " << V[0].transpose() << std::endl; - // Ignore degenerate faces - face_projection_info[face_id].valid = false; - int64_t width = 0, height = 0; // later such patches will be ignored - for (size_t v_it = 0; v_it < 3; v_it++) - face_texcoords[v_it] = math::Vec2f(0.0f, 0.0f); // ensure this is initialized - texture_patches[face_id] = IsaacTexturePatch::create(face_id, patch_faces, face_texcoords, - width, height); - continue; - } -#endif - // Compute the double precision normal, as we will do a lot of // careful math. It agrees with the pre-existing float normal. Eigen::Vector3d N = (V[1] - V[0]).cross(V[2] - V[0]); @@ -771,19 +785,20 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_ } } - // Put a little padding just in case - int64_t padding = 1; - min_y -= padding * pixel_size; - max_y += padding * pixel_size; - min_z -= padding * pixel_size; - max_z += padding * pixel_size; + FaceInfo& F = face_projection_info[face_id]; // alias + + // Put a little padding just in case (its value is 1 in the constructor) + min_y -= F.face_info_padding * pixel_size; + max_y += F.face_info_padding * pixel_size; + min_z -= F.face_info_padding * pixel_size; + max_z += F.face_info_padding * pixel_size; int64_t width = ceil((max_y - min_y) / pixel_size) + 1; int64_t height = ceil((max_z - min_z) / pixel_size) + 1; // Record where the triangle vertices will be in the sampled bounding box of the face for (size_t v_it = 0; v_it < 3; v_it++) - face_texcoords[v_it] = math::Vec2f((TransformedVertices[v_it][1] - min_y) / pixel_size, + face_texcoords[v_it] = Eigen::Vector2d((TransformedVertices[v_it][1] - min_y) / pixel_size, (TransformedVertices[v_it][2] - min_z) / pixel_size); // Append these to the bigger list @@ -793,7 +808,6 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_ // Store the info that we will need later to find where a little tile // should go. - FaceInfo& F = face_projection_info[face_id]; // alias F.x = x; F.min_y = min_y; F.min_z = min_z; @@ -801,7 +815,6 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_ F.height = height; F.TransformedVertices = TransformedVertices; F.YZPlaneToTriangleFace = YZPlaneToTriangleFace; - F.padding = padding; // Form a little rectangle, which later will be inserted in the right place // in the texture atlas @@ -844,7 +857,7 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_ // These texcoords and the ones above are related by a scale transform + translation // that transform that can change depending on which atlas we are on. - ObjModel::TexCoords const& model_texcoords = model.get_texcoords(); + IsaacObjModel::TexCoords const& model_texcoords = model.get_texcoords(); // The two vectors below would be equal if we process the mesh fully, // and not so unless we stop early. @@ -906,19 +919,17 @@ void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vectorsecond)[0] << " " << (it->second)[1] << '\n'; - } + for (auto it = uv_map.begin(); it != uv_map.end(); it++) + out << "vt " << (it->second)[0] << " " << (it->second)[1] << "\n"; - for (std::size_t i = 0; i < mesh_normals.size(); i++) { + for (std::size_t i = 0; i < mesh_normals.size(); i++) out << "vn " << mesh_normals[i][0] << " " << mesh_normals[i][1] << " " - << mesh_normals[i][2] << '\n'; - } + << mesh_normals[i][2] << "\n"; int64_t OBJ_INDEX_OFFSET = 1; // have indices start from 1 for (std::size_t j = 0; j < face_vec.size(); j++) { @@ -928,39 +939,42 @@ void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector const& vertices = texture_model.get_vertices(); - std::vector const& texcoords = texture_model.get_texcoords(); + std::vector const& texcoords = texture_model.get_texcoords(); std::vector const& normals = texture_model.get_normals(); - std::vector const& groups = texture_model.get_groups(); + std::vector const& groups = texture_model.get_groups(); std::ostringstream out; out << "mtllib " << out_prefix << ".mtl\n"; - out << std::fixed << std::setprecision(8); + // Must have high precision, as otherwise for large .obj files a loss of precision + // will happen when the normalized texcoords are not saved with enough digits + // and then on loading are multiplied back by the large texture dimensions. + out << std::setprecision(16); for (std::size_t i = 0; i < vertices.size(); i++) - out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << '\n'; + out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << "\n"; + // Here use 1.0 rather than 1.0f to not lose precision for (std::size_t i = 0; i < texcoords.size(); i++) - out << "vt " << texcoords[i][0] << " " << 1.0f - texcoords[i][1] << '\n'; + out << "vt " << texcoords[i][0] << " " << 1.0 - texcoords[i][1] << "\n"; - for (std::size_t i = 0; i < normals.size(); i++) { - out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << '\n'; - } + for (std::size_t i = 0; i < normals.size(); i++) + out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << "\n"; int64_t OBJ_INDEX_OFFSET = 1; // have indices start from 1 for (std::size_t i = 0; i < groups.size(); i++) { - out << "usemtl " << groups[i].material_name << '\n'; + out << "usemtl " << groups[i].material_name << "\n"; for (std::size_t j = 0; j < groups[i].faces.size(); j++) { - ObjModel::Face const& face = groups[i].faces[j]; + IsaacObjModel::Face const& face = groups[i].faces[j]; out << "f"; for (std::size_t k = 0; k < 3; ++k) { out << " " // NOLINT @@ -968,7 +982,7 @@ void formObj(tex::Model& texture_model, std::string const& out_prefix, std::stri << face.texcoord_ids[k] + OBJ_INDEX_OFFSET << "/" // NOLINT << face.normal_ids[k] + OBJ_INDEX_OFFSET; // NOLINT } - out << '\n'; + out << "\n"; } } @@ -1182,13 +1196,16 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Project texture using a texture model that was already pre-filled, so // just update pixel values -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, - camera::CameraModel const& cam, std::vector& smallest_cost_per_face, double pixel_size, +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, double pixel_size, int64_t num_threads, std::vector const& face_projection_info, - std::vector& texture_atlases, tex::Model& model, cv::Mat& out_texture) { - omp_set_dynamic(0); // Explicitly disable dynamic teams - omp_set_num_threads(num_threads); // Use this many threads for all - // consecutive parallel regions + std::vector& texture_atlases, + IsaacObjModel& model, cv::Mat& out_texture) { + // Explicitly disable dynamic determination of number of threads + omp_set_dynamic(0); + // Use this many threads for all consecutive parallel regions + omp_set_num_threads(num_threads); util::WallTimer timer; @@ -1353,9 +1370,9 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // The vertices of the face transformed to a y-z plane (ignore the x) Triangle tri(TV[0][1], TV[0][2], TV[1][1], TV[1][2], TV[2][1], TV[2][2]); - // Apply a 1.5 * pixel_size bias * padding to ensure all the pixels around + // Apply a 1.5 * pixel_size bias * F.face_info_padding to ensure all the pixels around // the triangle are sampled well. - tri = bias_triangle(tri, 1.5 * pixel_size * F.padding); + tri = bias_triangle(tri, 1.5 * pixel_size * F.face_info_padding); for (int64_t iz = 0; iz < F.height; iz++) { double out_y0 = -1.0, out_y1 = -1.0; @@ -1366,7 +1383,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b int64_t min_iy = std::max(static_cast(floor((out_y0 - F.min_y) / pixel_size)), 0L); int64_t max_iy = - std::min(static_cast(ceil((out_y1 - F.min_y) / pixel_size)), static_cast(F.width) - 1L); + std::min(static_cast(ceil((out_y1 - F.min_y) / pixel_size)), + static_cast(F.width) - 1L); for (int64_t iy = min_iy; iy <= max_iy; iy++) { // The point in the plane in which we do the book-keeping @@ -1448,10 +1466,57 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // seconds\n"; } +// Intersect ray with a mesh. Return true on success. +bool ray_mesh_intersect(Eigen::Vector2d const& dist_pix, + camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + double min_ray_dist, double max_ray_dist, + // Output + Eigen::Vector3d& intersection) { + // Initialize the output + intersection = Eigen::Vector3d(0.0, 0.0, 0.0); + + // Undistort the pixel + Eigen::Vector2d undist_centered_pix; + cam_params.Convert + (dist_pix, &undist_centered_pix); + + // Ray from camera going through the undistorted and centered pixel + Eigen::Vector3d cam_ray(undist_centered_pix.x() / cam_params.GetFocalVector()[0], + undist_centered_pix.y() / cam_params.GetFocalVector()[1], 1.0); + cam_ray.normalize(); + + Eigen::Affine3d cam_to_world = world_to_cam.inverse(); + Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; + Eigen::Vector3d cam_ctr = cam_to_world.translation(); + + // Set up the ray structure for the mesh + BVHTree::Ray bvh_ray; + bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); + bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); + bvh_ray.dir.normalize(); + + bvh_ray.tmin = min_ray_dist; + bvh_ray.tmax = max_ray_dist; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (bvh_tree->intersect(bvh_ray, &hit)) { + double cam_to_mesh_dist = hit.t; + intersection = cam_ctr + cam_to_mesh_dist * world_ray; + return true; + } + + return false; +} + // Project and save a mesh as an obj file to out_prefix.obj, // out_prefix.mtl, out_prefix.png. -void meshProject(mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, cv::Mat const& image, - Eigen::Affine3d const& world_to_cam, camera::CameraParameters const& cam_params, +void meshProject(mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, + cv::Mat const& image, Eigen::Affine3d const& world_to_cam, + camera::CameraParameters const& cam_params, int64_t num_exclude_boundary_pixels, std::string const& out_prefix) { // Create the output directory, if needed std::string out_dir = boost::filesystem::path(out_prefix).parent_path().string(); diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index d5e5dc2c..84fb1855 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -17,18 +17,72 @@ * under the License. */ -// TODO(oalexan1): Move these OpenMVG headers to dense_map_utils.cc, -// and do forward declarations in dense_map_utils.h. Get rid of -// warnings beyond our control -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic push -#include -#include -#include -#include -#include -#pragma GCC diagnostic pop +// The algorithm: + +// We assume our camera rig has n camera types. Each can be image or +// depth + image. Just one camera must be the reference camera. In +// this code that will be nav_cam. + +// We assume we know the precise time every camera image is acquired. +// Every non-ref camera will be bracketed by two ref cameras very +// close in time. Hence, given the two bracketing ref camera poses, +// the ref cam pose will be interpolated at the time a non-ref camera +// is measured. This allows one to model the transform between +// the ref camera and every other camera on the rig. + +// The variables to be optimized will be the pose of each ref camera, +// and the transforms from the ref camera to every other camera type +// (the extrinsics), with these transforms independent of time as the +// rig is rigid. Also optimized are the intrinsics of each camera, and +// the transform from each depth camera's cloud coordinates to its +// image coordinates (it is a transform very close to the identity but +// not quite, and a scale factor may be present). + +// One component of the cost function to minimize measures the +// reprojection error in each camera, from each triangulated point in +// world coordinates. A second one measures the error between a +// triangulated point and corresponding depth measurement at that +// pixel, when applicable, with appropriate transforms applied to +// bring the depth measurement to world coordinates. This second +// error's strength is controlled by depth_tri_weight. + +// Optionally, one can constrain that the triangulated points +// intersect close to a preexisting mesh, representing the surface +// being scanned with the rig given a previous estimation of all +// the camera poses. That mesh is computed using the geometry mapper. +// One also can control how close the depth camera clouds are to this +// mesh. The flags for this are mesh_tri_weight and depth_tri_weight, +// and can be set to 0 if desired not to use them. + +// These mesh constraints bring in additional information, +// particularly for the cameras lacking depth, and help get the focal +// lengths correctly. + +// If different camera sensors are on different CPUs, and a time +// offset exists among their clocks, this program can model that, +// and also float those offsets, if desired. + +// The initial ref camera poses are computed using SfM, with the +// Astrobee build_map tool. The obtained "sparse map" of poses must +// be registered to world coordinates, to get the world scale correctly. +// The sparse map can be fixed or further refined in this tool. + +// The initial extrinsics are assumed known, and are refined by this +// tool. Likely SfM can be used to get an initial value of the +// extrinsics, but for that may need to use the Theia tool which can +// do SfM with cameras acquired with different sensors. + +// Every camera object (struct cameraImage) can look up its type, +// timestamp, timestamps and indices of bracketing cameras, image topic, +// depth topic (if present), ref_to_cam_timestamp_offset, and +// ref_to_cam_transform (extrinsics). A camera object also stores its +// image and depth cloud. + +// For every instance of a reference camera its +// ref_to_cam_timestamp_offset is 0 and kept fixed, +// ref_to_cam_transform (extrinsics) is the identity and kept fixed, +// and the indices pointing to the left and right ref bracketing +// cameras are identical. #include #include @@ -74,6 +128,7 @@ #include #include #include +#include #include #include @@ -211,8 +266,6 @@ DEFINE_double(refiner_min_angle, 0.5, "If filtering outliers, remove triangulate "Note that some cameras in the rig may be very close to each other relative to " "the triangulated points, so care is needed here."); -DEFINE_bool(refiner_skip_filtering, false, "Do not do any outlier filtering."); - DEFINE_string(out_texture_dir, "", "If non-empty and if an input mesh was provided, " "project the camera images using the optimized poses onto the mesh " "and write the obtained .obj files in the given directory."); @@ -244,7 +297,8 @@ DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables "less than this."); DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); - +DEFINE_int32(num_match_threads, 8, "How many threads to use in feature detection/matching. " + "A large number can use a lot of memory."); DEFINE_string(sci_cam_timestamps, "", "Use only these sci cam timestamps. Must be a file with one timestamp per line."); @@ -254,7 +308,13 @@ DEFINE_bool(verbose, false, namespace dense_map { -// Calculate interpolated world to camera trans +// Calculate interpolated world to camera transform. Use the +// convention that if beg_ref_stamp == end_ref_stamp, then this is the +// reference camera, and then only beg_world_to_ref_t is used, while +// end_world_to_ref_t is undefined. For the reference camera it is +// also expected that ref_to_cam_aff is the identity. This saves some +// code duplication later as the ref cam need not be treated +// separately. Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, const double* end_world_to_ref_t, const double* ref_to_cam_trans, @@ -265,30 +325,33 @@ Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, Eigen::Affine3d beg_world_to_ref_aff; array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); - Eigen::Affine3d end_world_to_ref_aff; - array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); + Eigen::Affine3d interp_world_to_cam_aff; + if (beg_ref_stamp == end_ref_stamp) { + interp_world_to_cam_aff = beg_world_to_ref_aff; + } else { + Eigen::Affine3d end_world_to_ref_aff; + array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); - Eigen::Affine3d ref_to_cam_aff; - array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); + Eigen::Affine3d ref_to_cam_aff; + array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); - // Covert from cam time to ref time and normalize. It is very - // important that below we subtract the big numbers from each - // other first, which are the timestamps, then subtract whatever - // else is necessary. Otherwise we get problems with numerical - // precision with CERES. - double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) - / (end_ref_stamp - beg_ref_stamp); + // Covert from cam time to ref time and normalize. It is very + // important that below we subtract the big numbers from each + // other first, which are the timestamps, then subtract whatever + // else is necessary. Otherwise we get problems with numerical + // precision with CERES. + // Note how the denominator never becomes 0. + double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) + / (end_ref_stamp - beg_ref_stamp); - if (beg_ref_stamp == end_ref_stamp) - alpha = 0.0; // handle division by zero + if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; - if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; + // Interpolate at desired time + Eigen::Affine3d interp_world_to_ref_aff = dense_map::linearInterp(alpha, beg_world_to_ref_aff, + end_world_to_ref_aff); - // Interpolate at desired time - Eigen::Affine3d interp_world_to_ref_aff - = dense_map::linearInterp(alpha, beg_world_to_ref_aff, end_world_to_ref_aff); - - Eigen::Affine3d interp_world_to_cam_aff = ref_to_cam_aff * interp_world_to_ref_aff; + interp_world_to_cam_aff = ref_to_cam_aff * interp_world_to_ref_aff; + } return interp_world_to_cam_aff; } @@ -413,97 +476,6 @@ struct BracketedCamError { int m_num_focal_lengths; }; // End class BracketedCamError -// An error function minimizing the error of projecting an xyz point -// into a reference camera. Bracketing, timestamps, and transform to -// ref cam are not needed. -struct RefCamError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - RefCamError(Eigen::Vector2d const& meas_dist_pix, - std::vector const& block_sizes, - camera::CameraParameters const& cam_params): - m_meas_dist_pix(meas_dist_pix), - m_block_sizes(block_sizes), - m_cam_params(cam_params), - m_num_focal_lengths(1) { - // Sanity check - if (m_block_sizes.size() != 5 || - m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_XYZ_PARAMS || - m_block_sizes[2] != m_num_focal_lengths || - m_block_sizes[3] != NUM_OPT_CTR_PARAMS || - m_block_sizes[4] != 1 // This will be overwritten shortly - ) { - LOG(FATAL) << "RefCamError: The block sizes were not set up properly.\n"; - } - - // Set the correct distortion size. This cannot be done in the interface for now. - m_block_sizes[4] = m_cam_params.GetDistortion().size(); - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - Eigen::Affine3d world_to_ref_t; - array_to_rigid_transform(world_to_ref_t, parameters[0]); - - // World point - Eigen::Vector3d X; - for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[1][it]; - - // Make a deep copy which we will modify - camera::CameraParameters cam_params = m_cam_params; - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[2][0], parameters[2][0]); - Eigen::Vector2d optical_center(parameters[3][0], parameters[3][1]); - Eigen::VectorXd distortion(m_block_sizes[4]); - for (int i = 0; i < m_block_sizes[4]; i++) distortion[i] = parameters[4][i]; - cam_params.SetFocalLength(focal_vector); - cam_params.SetOpticalOffset(optical_center); - cam_params.SetDistortion(distortion); - - // Convert world point to given cam coordinates - X = world_to_ref_t * X; - - // Project into the image - Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - Eigen::Vector2d curr_dist_pix; - cam_params.Convert(undist_pix, &curr_dist_pix); - - // Compute the residuals - residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; - residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* - Create(Eigen::Vector2d const& meas_dist_pix, - std::vector const& block_sizes, - camera::CameraParameters const& cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new RefCamError(meas_dist_pix, block_sizes, cam_params)); - - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_PIX_PARAMS); - - // The camera wrapper knows all of the block sizes to add, except - // for distortion, which is last - for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 - cost_function->AddParameterBlock(block_sizes[i]); - - // The distortion block size is added separately as it is variable - cost_function->AddParameterBlock(cam_params.GetDistortion().size()); - - return cost_function; - } - - private: - Eigen::Vector2d m_meas_dist_pix; - std::vector m_block_sizes; - camera::CameraParameters m_cam_params; - int m_num_focal_lengths; -}; // End class RefCamError - // An error function minimizing the product of a given weight and the // error between a triangulated point and a measured depth point. The // depth point needs to be transformed to world coordinates first. For @@ -693,82 +665,6 @@ struct BracketedDepthMeshError { std::vector m_block_sizes; }; // End class BracketedDepthMeshError -// An error function minimizing the product of a given weight and the -// error between a triangulated point and a measured depth point for -// the ref camera. The depth point needs to be transformed to world -// coordinates first. -struct RefDepthError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - RefDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, - std::vector const& block_sizes): - m_weight(weight), - m_meas_depth_xyz(meas_depth_xyz), - m_block_sizes(block_sizes) { - // Sanity check - if (m_block_sizes.size() != 4 || - m_block_sizes[0] != NUM_RIGID_PARAMS || - (m_block_sizes[1] != NUM_RIGID_PARAMS && m_block_sizes[1] != NUM_AFFINE_PARAMS) || - m_block_sizes[2] != NUM_SCALAR_PARAMS || - m_block_sizes[3] != NUM_XYZ_PARAMS) { - LOG(FATAL) << "RefDepthError: The block sizes were not set up properly.\n"; - } - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - // Current world to camera transform - Eigen::Affine3d world_to_cam; - array_to_rigid_transform(world_to_cam, parameters[0]); - - // The current transform from the depth point cloud to the camera image - Eigen::Affine3d depth_to_image; - if (m_block_sizes[1] == NUM_AFFINE_PARAMS) - array_to_affine_transform(depth_to_image, parameters[1]); - else - array_to_rigid_transform(depth_to_image, parameters[1]); - - // Apply the scale - double depth_to_image_scale = parameters[2][0]; - depth_to_image.linear() *= depth_to_image_scale; - - // Convert from depth cloud coordinates to cam coordinates - Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - - // Convert to world coordinates - M = world_to_cam.inverse() * M; - - // Triangulated world point - Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - - // Compute the residuals - for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { - residuals[it] = m_weight * (X[it] - M[it]); - } - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, - std::vector const& block_sizes) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new RefDepthError(weight, meas_depth_xyz, block_sizes)); - - cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - - for (size_t i = 0; i < block_sizes.size(); i++) - cost_function->AddParameterBlock(block_sizes[i]); - - return cost_function; - } - - private: - double m_weight; // How much weight to give to this constraint - Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement - std::vector m_block_sizes; -}; // End class RefDepthError - // An error function minimizing a weight times the distance from a // variable xyz point to a fixed reference xyz point. struct XYZError { @@ -854,433 +750,235 @@ void calc_median_residuals(std::vector const& residuals, } } - typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; - - // A class to encompass all known information about a camera - // This is work in progress and will replace some of the logic further down. - struct cameraImage { - // An index to look up the type of camera. This will equal the - // value ref_camera_type if and only if this is a reference - // camera. - int camera_type; - - // The timestamp for this camera (in floating point seconds since epoch) - // measured by the clock/cpu which is particular to this camera. - double timestamp; - - // The timestamp with an adjustment added to it to be in - // reference camera time - double ref_timestamp; - - // The timestamp of the closest cloud for this image, measured - // with the same clock as the 'timestamp' value. - double cloud_timestamp; - - // Indices to look up the reference cameras bracketing this camera - // in time. The two indices will have same value if and only if - // this is a reference camera. - int beg_ref_index; - int end_ref_index; - - // The image for this camera, in grayscale - cv::Mat image; - - // The corresponding depth cloud, for an image + depth camera. - // Will be empty and uninitialized for a camera lacking depth. - cv::Mat depth_cloud; - }; - - // Sort by timestamps adjusted to be relative to the ref camera clock - bool timestampLess(cameraImage i, cameraImage j) { - return (i.ref_timestamp < j.ref_timestamp); - } +// Sort by timestamps adjusted to be relative to the ref camera clock +bool timestampLess(cameraImage i, cameraImage j) { + return (i.ref_timestamp < j.ref_timestamp); +} - // Find the haz cam depth measurement. Use nearest neighbor interpolation - // to look into the depth cloud. - bool depthValue( // Inputs - cv::Mat const& depth_cloud, Eigen::Vector2d const& dist_ip, - // Output - Eigen::Vector3d& depth_xyz) { - depth_xyz = Eigen::Vector3d(0, 0, 0); // initialize +// Find the haz cam depth measurement. Use nearest neighbor interpolation +// to look into the depth cloud. +bool depthValue( // Inputs + cv::Mat const& depth_cloud, Eigen::Vector2d const& dist_ip, + // Output + Eigen::Vector3d& depth_xyz) { + depth_xyz = Eigen::Vector3d(0, 0, 0); // initialize - if (depth_cloud.cols == 0 && depth_cloud.rows == 0) return false; // empty cloud + if (depth_cloud.cols == 0 && depth_cloud.rows == 0) return false; // empty cloud - int col = round(dist_ip[0]); - int row = round(dist_ip[1]); + int col = round(dist_ip[0]); + int row = round(dist_ip[1]); - if (col < 0 || row < 0 || col > depth_cloud.cols || row > depth_cloud.rows) - LOG(FATAL) << "Out of range in depth cloud."; + if (col < 0 || row < 0 || col > depth_cloud.cols || row > depth_cloud.rows) + LOG(FATAL) << "Out of range in depth cloud."; + + // After rounding one may hit the bound + if (col == depth_cloud.cols || row == depth_cloud.rows) + return false; - // After rounding one may hit the bound - if (col == depth_cloud.cols || row == depth_cloud.rows) - return false; + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); - cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) + return false; - // Skip invalid measurements - if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) - return false; + depth_xyz = Eigen::Vector3d(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); - depth_xyz = Eigen::Vector3d(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + return true; +} - return true; +// Project given images with optimized cameras onto mesh. It is +// assumed that the most up-to-date cameras were copied/interpolated +// form the optimizer structures into the world_to_cam vector. +void meshProjectCameras(std::vector const& cam_names, + std::vector const& cam_params, + std::vector const& cam_images, + std::vector const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + int ref_camera_type, int nav_cam_num_exclude_boundary_pixels, + std::string const& out_dir) { + if (cam_names.size() != cam_params.size()) + LOG(FATAL) << "There must be as many camera names as sets of camera parameters.\n"; + if (cam_images.size() != world_to_cam.size()) + LOG(FATAL) << "There must be as many camera images as camera poses.\n"; + if (out_dir.empty()) + LOG(FATAL) << "The output directory is empty.\n"; + + char filename_buffer[1000]; + + for (size_t cid = 0; cid < cam_images.size(); cid++) { + double timestamp = cam_images[cid].timestamp; + int cam_type = cam_images[cid].camera_type; + + int num_exclude_boundary_pixels = 0; + if (cam_type == ref_camera_type) + num_exclude_boundary_pixels = nav_cam_num_exclude_boundary_pixels; + + // Must use the 10.7f format for the timestamp as everywhere else in the code + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s", + out_dir.c_str(), timestamp, cam_names[cam_type].c_str()); + std::string out_prefix = filename_buffer; // convert to string + + std::cout << "Creating texture for: " << out_prefix << std::endl; + meshProject(mesh, bvh_tree, cam_images[cid].image, world_to_cam[cid], cam_params[cam_type], + num_exclude_boundary_pixels, out_prefix); } +} - // Project given images with optimized cameras onto mesh. It is - // assumed that the most up-to-date cameras were copied/interpolated - // form the optimizer structures into the world_to_cam vector. - void meshProjectCameras(std::vector const& cam_names, - std::vector const& cam_params, - std::vector const& cam_images, - std::vector const& world_to_cam, - mve::TriangleMesh::Ptr const& mesh, - std::shared_ptr const& bvh_tree, - int ref_camera_type, int nav_cam_num_exclude_boundary_pixels, - std::string const& out_dir) { - if (cam_names.size() != cam_params.size()) - LOG(FATAL) << "There must be as many camera names as sets of camera parameters.\n"; - if (cam_images.size() != world_to_cam.size()) - LOG(FATAL) << "There must be as many camera images as camera poses.\n"; - if (out_dir.empty()) - LOG(FATAL) << "The output directory is empty.\n"; - - char filename_buffer[1000]; - - for (size_t cid = 0; cid < cam_images.size(); cid++) { - double timestamp = cam_images[cid].timestamp; - int cam_type = cam_images[cid].camera_type; - - int num_exclude_boundary_pixels = 0; - if (cam_type == ref_camera_type) - num_exclude_boundary_pixels = nav_cam_num_exclude_boundary_pixels; - - // Must use the 10.7f format for the timestamp as everywhere else in the code - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s", - out_dir.c_str(), timestamp, cam_names[cam_type].c_str()); - std::string out_prefix = filename_buffer; // convert to string - - std::cout << "Creating texture for: " << out_prefix << std::endl; - meshProject(mesh, bvh_tree, cam_images[cid].image, world_to_cam[cid], cam_params[cam_type], - num_exclude_boundary_pixels, out_prefix); - } - } +// Rebuild the map. +// TODO(oalexan1): This must be integrated in astrobee. +void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data + camera::CameraParameters const& cam_params, + boost::shared_ptr sparse_map) { + std::string rebuild_detector = "SURF"; + std::cout << "Rebuilding map with " << rebuild_detector << " detector."; - // Rebuild the map. - // TODO(oalexan1): This must be integrated in astrobee. - void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data - camera::CameraParameters const& cam_params, - boost::shared_ptr sparse_map) { - std::string rebuild_detector = "SURF"; - std::cout << "Rebuilding map with " << rebuild_detector << " detector."; - - // Set programatically the command line option for astrobee's map - // building min angle based on the corresponding refiner flag. - std::ostringstream oss; - oss << FLAGS_refiner_min_angle; - std::string min_valid_angle = oss.str(); - google::SetCommandLineOption("min_valid_angle", min_valid_angle.c_str()); - if (!gflags::GetCommandLineOption("min_valid_angle", &min_valid_angle)) - LOG(FATAL) << "Failed to get the value of --min_valid_angle in Astrobee " - << "map-building software.\n"; - // The newline below is due to the sparse map software not putting a newline - std::cout << "\nSetting --min_valid_angle " << min_valid_angle << ".\n"; - - // Copy some data to make sure it does not get lost on resetting things below - std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; - std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; - - // Ensure the new camera parameters are set - sparse_map->SetCameraParameters(cam_params); - - std::cout << "Detecting features."; - sparse_map->DetectFeatures(); - - std::cout << "Matching features."; - // Borrow from the original map which images should be matched with which. - sparse_map->cid_to_cid_.clear(); - for (size_t p = 0; p < pid_to_cid_fid.size(); p++) { - std::map const& track = pid_to_cid_fid[p]; // alias - for (std::map::const_iterator it1 = track.begin(); - it1 != track.end() ; it1++) { - for (std::map::const_iterator it2 = it1; - it2 != track.end() ; it2++) { - if (it1->first != it2->first) { - // Never match an image with itself - sparse_map->cid_to_cid_[it1->first].insert(it2->first); - } + // Set programatically the command line option for astrobee's map + // building min angle based on the corresponding refiner flag. + std::ostringstream oss; + oss << FLAGS_refiner_min_angle; + std::string min_valid_angle = oss.str(); + google::SetCommandLineOption("min_valid_angle", min_valid_angle.c_str()); + if (!gflags::GetCommandLineOption("min_valid_angle", &min_valid_angle)) + LOG(FATAL) << "Failed to get the value of --min_valid_angle in Astrobee " + << "map-building software.\n"; + // The newline below is due to the sparse map software not putting a newline + std::cout << "\nSetting --min_valid_angle " << min_valid_angle << ".\n"; + + // Copy some data to make sure it does not get lost on resetting things below + std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; + std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; + + // Ensure the new camera parameters are set + sparse_map->SetCameraParameters(cam_params); + + std::cout << "Detecting features."; + sparse_map->DetectFeatures(); + + std::cout << "Matching features."; + // Borrow from the original map which images should be matched with which. + sparse_map->cid_to_cid_.clear(); + for (size_t p = 0; p < pid_to_cid_fid.size(); p++) { + std::map const& track = pid_to_cid_fid[p]; // alias + for (std::map::const_iterator it1 = track.begin(); + it1 != track.end() ; it1++) { + for (std::map::const_iterator it2 = it1; + it2 != track.end() ; it2++) { + if (it1->first != it2->first) { + // Never match an image with itself + sparse_map->cid_to_cid_[it1->first].insert(it2->first); } } } - - sparse_mapping::MatchFeatures(sparse_mapping::EssentialFile(map_file), - sparse_mapping::MatchesFile(map_file), sparse_map.get()); - for (size_t i = 0; i < world_to_ref_t.size(); i++) - sparse_map->SetFrameGlobalTransform(i, world_to_ref_t[i]); - - // Wipe file that is no longer needed - try { - std::remove(sparse_mapping::EssentialFile(map_file).c_str()); - }catch(...) {} - - std::cout << "Building tracks."; - bool rm_invalid_xyz = true; // by now cameras are good, so filter out bad stuff - sparse_mapping::BuildTracks(rm_invalid_xyz, - sparse_mapping::MatchesFile(map_file), - sparse_map.get()); - - // It is essential that during re-building we do not vary the - // cameras. Those were usually computed with a lot of SURF features, - // while rebuilding is usually done with many fewer ORGBRISK - // features. - bool fix_cameras = true; - if (fix_cameras) - std::cout << "Performing bundle adjustment with fixed cameras."; - else - std::cout << "Performing bundle adjustment while floating cameras."; - - sparse_mapping::BundleAdjust(fix_cameras, sparse_map.get()); - } - - // TODO(oalexan1): Move this to utils. - // Intersect ray with mesh. Return true on success. - bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, - camera::CameraParameters const& cam_params, - Eigen::Affine3d const& world_to_cam, - mve::TriangleMesh::Ptr const& mesh, - std::shared_ptr const& bvh_tree, - double min_ray_dist, double max_ray_dist, - // Output - Eigen::Vector3d& intersection) { - // Initialize the output - intersection = Eigen::Vector3d(0.0, 0.0, 0.0); - - // Ray from camera going through the pixel - Eigen::Vector3d cam_ray(undist_pix.x() / cam_params.GetFocalVector()[0], - undist_pix.y() / cam_params.GetFocalVector()[1], 1.0); - cam_ray.normalize(); - - Eigen::Affine3d cam_to_world = world_to_cam.inverse(); - Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; - Eigen::Vector3d cam_ctr = cam_to_world.translation(); - - // Set up the ray structure for the mesh - BVHTree::Ray bvh_ray; - bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); - bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); - bvh_ray.dir.normalize(); - - bvh_ray.tmin = min_ray_dist; - bvh_ray.tmax = max_ray_dist; - - // Intersect the ray with the mesh - BVHTree::Hit hit; - if (bvh_tree->intersect(bvh_ray, &hit)) { - double cam_to_mesh_dist = hit.t; - intersection = cam_ctr + cam_to_mesh_dist * world_ray; - return true; - } - - return false; } - // Compute the transforms from the world to every camera, using pose interpolation - // if necessary. - void calc_world_to_cam_transforms( // Inputs - std::vector const& cams, std::vector const& world_to_ref_vec, - std::vector const& ref_timestamps, std::vector const& ref_to_cam_vec, - std::vector const& ref_to_cam_timestamp_offsets, - // Output - std::vector& world_to_cam) { - if (ref_to_cam_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_to_cam_timestamp_offsets.size()) - LOG(FATAL) << "Must have as many transforms to reference as timestamp offsets.\n"; - if (world_to_ref_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_timestamps.size()) - LOG(FATAL) << "Must have as many reference timestamps as reference cameras.\n"; - - world_to_cam.resize(cams.size()); - - for (size_t it = 0; it < cams.size(); it++) { - int beg_index = cams[it].beg_ref_index; - int end_index = cams[it].end_ref_index; - int cam_type = cams[it].camera_type; - world_to_cam[it] = dense_map::calc_world_to_cam_trans - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - ref_timestamps[beg_index], ref_timestamps[end_index], - ref_to_cam_timestamp_offsets[cam_type], - cams[it].timestamp); - } - return; - } - - // Match features while assuming that the input cameras can be used to filter out - // outliers by reprojection error. - void matchFeaturesWithCams(std::mutex* match_mutex, int left_image_index, int right_image_index, - camera::CameraParameters const& left_params, - camera::CameraParameters const& right_params, - Eigen::Affine3d const& left_world_to_cam, - Eigen::Affine3d const& right_world_to_cam, - double reprojection_error, - cv::Mat const& left_descriptors, cv::Mat const& right_descriptors, - Eigen::Matrix2Xd const& left_keypoints, - Eigen::Matrix2Xd const& right_keypoints, - bool verbose, - // output - MATCH_PAIR* matches) { - // Match by using descriptors first - std::vector cv_matches; - interest_point::FindMatches(left_descriptors, right_descriptors, &cv_matches); - - // Do filtering - std::vector left_vec; - std::vector right_vec; - std::vector filtered_cv_matches; - for (size_t j = 0; j < cv_matches.size(); j++) { - int left_ip_index = cv_matches.at(j).queryIdx; - int right_ip_index = cv_matches.at(j).trainIdx; - - Eigen::Vector2d dist_left_ip(left_keypoints.col(left_ip_index)[0], - left_keypoints.col(left_ip_index)[1]); - - Eigen::Vector2d dist_right_ip(right_keypoints.col(right_ip_index)[0], - right_keypoints.col(right_ip_index)[1]); - - Eigen::Vector2d undist_left_ip; - Eigen::Vector2d undist_right_ip; - left_params.Convert - (dist_left_ip, &undist_left_ip); - right_params.Convert - (dist_right_ip, &undist_right_ip); - - Eigen::Vector3d X = - dense_map::TriangulatePair(left_params.GetFocalLength(), right_params.GetFocalLength(), - left_world_to_cam, right_world_to_cam, - undist_left_ip, undist_right_ip); - - // Project back into the cameras - Eigen::Vector3d left_cam_X = left_world_to_cam * X; - Eigen::Vector2d undist_left_pix - = left_params.GetFocalVector().cwiseProduct(left_cam_X.hnormalized()); - Eigen::Vector2d dist_left_pix; - left_params.Convert(undist_left_pix, - &dist_left_pix); - - Eigen::Vector3d right_cam_X = right_world_to_cam * X; - Eigen::Vector2d undist_right_pix - = right_params.GetFocalVector().cwiseProduct(right_cam_X.hnormalized()); - Eigen::Vector2d dist_right_pix; - right_params.Convert(undist_right_pix, - &dist_right_pix); - - // Filter out points whose reprojection error is too big - bool is_good = ((dist_left_ip - dist_left_pix).norm() <= reprojection_error && - (dist_right_ip - dist_right_pix).norm() <= reprojection_error); - - // If any values above are Inf or NaN, is_good will be false as well - if (!is_good) continue; - - // Get the keypoints from the good matches - left_vec.push_back(cv::Point2f(left_keypoints.col(left_ip_index)[0], - left_keypoints.col(left_ip_index)[1])); - right_vec.push_back(cv::Point2f(right_keypoints.col(right_ip_index)[0], - right_keypoints.col(right_ip_index)[1])); - - filtered_cv_matches.push_back(cv_matches[j]); - } - - if (left_vec.empty()) return; - - // Filter using geometry constraints - // These may need some tweaking but works reasonably well. - double ransacReprojThreshold = 20.0; - cv::Mat inlier_mask; - int maxIters = 10000; - double confidence = 0.8; - - // affine2D works better than homography - // cv::Mat H = cv::findHomography(left_vec, right_vec, cv::RANSAC, - // ransacReprojThreshold, inlier_mask, maxIters, confidence); - cv::Mat H = cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, - ransacReprojThreshold, maxIters, confidence); - - std::vector left_ip, right_ip; - for (size_t j = 0; j < filtered_cv_matches.size(); j++) { - int left_ip_index = filtered_cv_matches.at(j).queryIdx; - int right_ip_index = filtered_cv_matches.at(j).trainIdx; - - if (inlier_mask.at(j, 0) == 0) continue; - - cv::Mat left_desc = left_descriptors.row(left_ip_index); - cv::Mat right_desc = right_descriptors.row(right_ip_index); - - InterestPoint left; - left.setFromCvKeypoint(left_keypoints.col(left_ip_index), left_desc); - - InterestPoint right; - right.setFromCvKeypoint(right_keypoints.col(right_ip_index), right_desc); - - left_ip.push_back(left); - right_ip.push_back(right); - } - - // Update the shared variable using a lock - match_mutex->lock(); + sparse_mapping::MatchFeatures(sparse_mapping::EssentialFile(map_file), + sparse_mapping::MatchesFile(map_file), sparse_map.get()); + for (size_t i = 0; i < world_to_ref_t.size(); i++) + sparse_map->SetFrameGlobalTransform(i, world_to_ref_t[i]); + + // Wipe file that is no longer needed + try { + std::remove(sparse_mapping::EssentialFile(map_file).c_str()); + }catch(...) {} + + std::cout << "Building tracks."; + bool rm_invalid_xyz = true; // by now cameras are good, so filter out bad stuff + sparse_mapping::BuildTracks(rm_invalid_xyz, + sparse_mapping::MatchesFile(map_file), + sparse_map.get()); + + // It is essential that during re-building we do not vary the + // cameras. Those were usually computed with a lot of SURF features, + // while rebuilding is usually done with many fewer ORGBRISK + // features. + bool fix_cameras = true; + if (fix_cameras) + std::cout << "Performing bundle adjustment with fixed cameras."; + else + std::cout << "Performing bundle adjustment while floating cameras."; - // Print the verbose message inside the lock, otherwise the text - // may get messed up. - if (verbose) - std::cout << "Number of matches for pair " - << left_image_index << ' ' << right_image_index << ": " - << left_ip.size() << std::endl; + sparse_mapping::BundleAdjust(fix_cameras, sparse_map.get()); +} - *matches = std::make_pair(left_ip, right_ip); - match_mutex->unlock(); +// Compute the transforms from the world to every camera, using pose interpolation +// if necessary. +void calc_world_to_cam_transforms( // Inputs + std::vector const& cams, + std::vector const& world_to_ref_vec, + std::vector const& ref_timestamps, + std::vector const& ref_to_cam_vec, + std::vector const& ref_to_cam_timestamp_offsets, + // Output + std::vector& world_to_cam) { + if (ref_to_cam_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_to_cam_timestamp_offsets.size()) + LOG(FATAL) << "Must have as many transforms to reference as timestamp offsets.\n"; + if (world_to_ref_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_timestamps.size()) + LOG(FATAL) << "Must have as many reference timestamps as reference cameras.\n"; + + world_to_cam.resize(cams.size()); + + for (size_t it = 0; it < cams.size(); it++) { + int beg_index = cams[it].beg_ref_index; + int end_index = cams[it].end_ref_index; + int cam_type = cams[it].camera_type; + world_to_cam[it] = dense_map::calc_world_to_cam_trans + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + ref_timestamps[beg_index], ref_timestamps[end_index], + ref_to_cam_timestamp_offsets[cam_type], + cams[it].timestamp); } + return; +} - // Find if a given feature is an inlier, and take care to check that - // the bookkeeping is correct. - int getMapValue(std::vector>> const& pid_cid_fid, - size_t pid, int cid, int fid) { - if (pid_cid_fid.size() <= pid) - LOG(FATAL) << "Current pid is out of range.\n"; - - auto& cid_fid_map = pid_cid_fid[pid]; // alias - auto cid_it = cid_fid_map.find(cid); - if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; +// Look up a map value and throw an error when not found +template +B mapVal(std::map const& map, A const& a) { + auto it = map.find(a); + if (it == map.end()) + LOG(FATAL) << "Cannot look up expected map value.\n"; - auto& fid_map = cid_it->second; // alias - auto fid_it = fid_map.find(fid); - if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; + return it->second; +} - return fid_it->second; - } +// Get a map value while being const-correct and also checking that the value exists. +template +T getMapValue(std::vector>> const& pid_cid_fid, size_t pid, int cid, + int fid) { + if (pid_cid_fid.size() <= pid) + LOG(FATAL) << "Current pid is out of range.\n"; - // Set a feature to be an outlier, and take care to check that - // the bookkeeping is correct. - void setMapValue(std::vector>> & pid_cid_fid, - size_t pid, int cid, int fid, int val) { - if (pid_cid_fid.size() <= pid) - LOG(FATAL) << "Current pid is out of range.\n"; + auto& cid_fid_map = pid_cid_fid[pid]; // alias + auto cid_it = cid_fid_map.find(cid); + if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; - auto& cid_fid_map = pid_cid_fid[pid]; // alias - auto cid_it = cid_fid_map.find(cid); - if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; + auto& fid_map = cid_it->second; // alias + auto fid_it = fid_map.find(fid); + if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; - auto& fid_map = cid_it->second; // alias - auto fid_it = fid_map.find(fid); - if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; + return fid_it->second; +} - fid_it->second = val; - } +// Set a map value while taking care that the place for it exists. +void setMapValue(std::vector>> & pid_cid_fid, + size_t pid, int cid, int fid, int val) { + if (pid_cid_fid.size() <= pid) + LOG(FATAL) << "Current pid is out of range.\n"; -} // namespace dense_map + auto& cid_fid_map = pid_cid_fid[pid]; // alias + auto cid_it = cid_fid_map.find(cid); + if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; -int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - GOOGLE_PROTOBUF_VERIFY_VERSION; + auto& fid_map = cid_it->second; // alias + auto fid_it = fid_map.find(fid); + if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; - std::cout.precision(17); // to be able to print timestamps + fid_it->second = val; +} +void parameterValidation() { if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; if (FLAGS_sparse_map.empty()) @@ -1329,240 +1027,96 @@ int main(int argc, char** argv) { if (FLAGS_registration && (FLAGS_xyz_file.empty() || FLAGS_hugin_file.empty())) LOG(FATAL) << "In order to register the map, the hugin and xyz file must be specified."; - // We assume our camera rig has n camera types. Each can be image or - // depth + image. Just one camera must be the reference camera. In - // this code it will be nav_cam. Every camera object (class - // cameraImage) knows its type (an index), which can be used to look - // up its intrinsics, image topic, depth topic (if present), - // ref_to_cam_timestamp_offset, and ref_to_cam_transform. A camera - // object also stores its image, depth cloud (if present), its - // timestamp, and indices pointing to its left and right ref - // bracketing cameras. - - // For every instance of a reference camera its - // ref_to_cam_timestamp_offset is 0 and kept fixed, - // ref_to_cam_transform is the identity and kept fixed, and the - // indices pointing to the left and right ref bracketing cameras are - // identical. + if (FLAGS_float_scale && FLAGS_affine_depth_to_image) + LOG(FATAL) << "The options --float_scale and --affine_depth_to_image should not be used " + << "together. If the latter is used, the scale is always floated.\n"; +} - // The info below will eventually come from a file - int num_cam_types = 3; - int ref_cam_type = 0; // Below we assume the starting cam is the ref cam +void set_up_block_sizes(int num_depth_params, + std::vector & bracketed_cam_block_sizes, + std::vector & bracketed_depth_block_sizes, + std::vector & bracketed_depth_mesh_block_sizes, + std::vector & mesh_block_sizes) { + // Wipe the outputs + bracketed_cam_block_sizes.clear(); + bracketed_depth_block_sizes.clear(); + bracketed_depth_mesh_block_sizes.clear(); + mesh_block_sizes.clear(); - // Image and depth topics - std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; - std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", - "/hw/depth_haz/extended/amplitude_int", - "/hw/cam_sci/compressed"}; - std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; + int num_focal_lengths = 1; // The x and y focal length are assumed to be the same + int num_distortion_params = 1; // will be overwritten later - std::vector cam_params; - std::vector ref_to_cam_trans; - std::vector ref_to_cam_timestamp_offsets; - Eigen::Affine3d nav_cam_to_body_trans; - Eigen::Affine3d haz_cam_depth_to_image_transform; - dense_map::readConfigFile( // Inputs - cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", - // Outputs - cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, nav_cam_to_body_trans, - haz_cam_depth_to_image_transform); - std::vector orig_cam_params = cam_params; + // Set up the variable blocks to optimize for BracketedCamError - if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { - for (size_t it = 0; it < cam_names.size(); it++) { - if (cam_names[it] == "sci_cam") { - double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; - std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] - << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; - ref_to_cam_timestamp_offsets[it] = new_val; - } - } - } + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_cam_block_sizes.push_back(num_focal_lengths); + bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); + bracketed_cam_block_sizes.push_back(num_distortion_params); - mve::TriangleMesh::Ptr mesh; - std::shared_ptr mesh_info; - std::shared_ptr graph; - std::shared_ptr bvh_tree; - if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + // Set up variable blocks to optimize for BracketedDepthError + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(num_depth_params); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - // If desired to process only specific timestamps - std::set sci_cam_timestamps_to_use; - if (FLAGS_sci_cam_timestamps != "") { - std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); - double val; - while (ifs >> val) sci_cam_timestamps_to_use.insert(val); - } + // Set up the variable blocks to optimize for BracketedDepthMeshError + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(num_depth_params); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - double haz_cam_depth_to_image_scale - = pow(haz_cam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); + // Set up the variable blocks to optimize for the mesh xyz + mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); +} - // Separate the initial scale. This is convenient if - // haz_cam_depth_to_image is scale * rotation + translation and if - // it is desired to keep the scale fixed. In either case, the scale - // will be multiplied back when needed. - Eigen::Affine3d haz_cam_normalized_depth_to_image = haz_cam_depth_to_image_transform; - haz_cam_normalized_depth_to_image.linear() /= haz_cam_depth_to_image_scale; +typedef std::map> StrToMsgMap; + +// Look up each ref cam image by timestamp. In between any two ref cam timestamps, +// which are no further from each other than the bracket length, look up an image +// of each of the other camera types. If more than one choice, try to stay as close +// as possible to the midpoint of the two bracketing ref cam timestamps. This way +// there's more wiggle room later if one attempts to modify the timestamp offset. +void lookupImagesAndBrackets( // Inputs + int ref_cam_type, double bracket_len, double timestamp_offsets_max_change, + double max_haz_cam_image_to_depth_timestamp_diff, bool float_timestamp_offsets, + std::vector const& cam_names, std::vector const& ref_timestamps, + std::vector const& image_topics, std::vector const& depth_topics, + StrToMsgMap const& bag_map, std::vector> const& cam_timestamps_to_use, + std::vector const& ref_to_cam_timestamp_offsets, + // Outputs + std::vector& cams, std::vector& min_timestamp_offset, + std::vector& max_timestamp_offset) { + std::cout << "Looking up images and timestamp bracketing." << std::endl; + + int num_ref_cams = ref_timestamps.size(); + int num_cam_types = cam_names.size(); + + // Initialize the outputs + cams.clear(); + min_timestamp_offset.resize(num_cam_types); + max_timestamp_offset.resize(num_cam_types); + + // A lot of care is needed with positions. This remembers how we travel in time + // for each camera type so we have fewer messages to search. + // But if a mistake is done below it will mess up this bookkeeping. + std::vector image_start_positions(num_cam_types, 0); + std::vector cloud_start_positions(num_cam_types, 0); - // Read the sparse map - boost::shared_ptr sparse_map = - boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + // Populate the data for each camera image + for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { + if (ref_cam_type != 0) + LOG(FATAL) << "It is assumed that the ref cam type is 0."; - // Deal with using a non-FOV model for nav_cam, if desired - Eigen::VectorXd nav_cam_distortion_replacement; - if (FLAGS_nav_cam_distortion_replacement != "") { - std::vector vec = dense_map::string_to_vector(FLAGS_nav_cam_distortion_replacement); - if (vec.size() != 4 && vec.size() != 5) - LOG(FATAL) << "nav_cam distortion replacement must consist of 4 or 5 values, corresponding" - << "to radial and tangential distortion coefficients.\n"; - nav_cam_distortion_replacement - = Eigen::Map(vec.data(), vec.size()); - } - - if (FLAGS_refiner_skip_filtering && - (cam_params[ref_cam_type].GetDistortion().size() > 1 || - nav_cam_distortion_replacement.size() != 0)) - LOG(FATAL) << "Must use outlier filtering if a non-fisheye lens distortion is used" - << "with nav_cam, as it is hard to to fit such a model at the image periphery.\n"; - - // TODO(oalexan1): All this timestamp reading logic below should be in a function - - // Parse the ref timestamps from the sparse map - // We assume the sparse map image names are the timestamps. - std::vector ref_timestamps; - const std::vector& sparse_map_images = sparse_map->cid_to_filename_; - ref_timestamps.resize(sparse_map_images.size()); - for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { - double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); - ref_timestamps[cid] = timestamp; - } - if (ref_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; - - // Will optimize the nav cam poses as part of the process - std::vector & world_to_ref_t = sparse_map->cid_to_cam_t_global_; // alias - - // Put transforms of the reference cameras in a vector. We will optimize them. - int num_ref_cams = world_to_ref_t.size(); - if (world_to_ref_t.size() != ref_timestamps.size()) - LOG(FATAL) << "Must have as many ref cam timestamps as ref cameras.\n"; - std::vector world_to_ref_vec(num_ref_cams * dense_map::NUM_RIGID_PARAMS); - for (int cid = 0; cid < num_ref_cams; cid++) - dense_map::rigid_transform_to_array(world_to_ref_t[cid], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - - std::vector> cam_timestamps_to_use = {std::set(), - std::set(), - sci_cam_timestamps_to_use}; - - // Which intrinsics from which cameras to float - std::vector> intrinsics_to_float(num_cam_types); - dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); - dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); - dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); - - std::string depth_to_image_name = "depth_to_image"; - std::set extrinsics_to_float; - dense_map::parse_extrinsics_to_float(cam_names, cam_names[ref_cam_type], depth_to_image_name, - FLAGS_extrinsics_to_float, extrinsics_to_float); - - if (FLAGS_float_scale && FLAGS_affine_depth_to_image) - LOG(FATAL) << "The options --float_scale and --affine_depth_to_image should not be used " - << "together. If the latter is used, the scale is always floated.\n"; - - if (!FLAGS_affine_depth_to_image && FLAGS_float_scale && - extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) - LOG(FATAL) << "Cannot float the scale of depth_to_image_transform unless this " - << "this is allowed as part of --extrinsics_to_float.\n"; - - if (FLAGS_nav_cam_distortion_replacement != "") { - if (intrinsics_to_float[ref_cam_type].find("distortion") - == intrinsics_to_float[ref_cam_type].end() || - intrinsics_to_float[ref_cam_type].size() != 1) { - LOG(FATAL) << "When --nav_cam_distortion_replacement is used, must float the nav cam " - << "distortion and no other nav cam intrinsics.\n"; - } - } - - // Put in arrays, so we can optimize them - std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::rigid_transform_to_array - (ref_to_cam_trans[cam_type], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - - // Set up the variable blocks to optimize for BracketedDepthError - int num_depth_params = dense_map::NUM_RIGID_PARAMS; - if (FLAGS_affine_depth_to_image) num_depth_params = dense_map::NUM_AFFINE_PARAMS; - - // Depth to image transforms and scales - std::vector normalized_depth_to_image; - std::vector depth_to_image_scales = {1.0, haz_cam_depth_to_image_scale, 1.0}; - normalized_depth_to_image.push_back(Eigen::Affine3d::Identity()); - normalized_depth_to_image.push_back(haz_cam_normalized_depth_to_image); - normalized_depth_to_image.push_back(Eigen::Affine3d::Identity()); - - // Put in arrays, so we can optimize them - std::vector normalized_depth_to_image_vec(num_cam_types * num_depth_params); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { - if (FLAGS_affine_depth_to_image) - dense_map::affine_transform_to_array - (normalized_depth_to_image[cam_type], - &normalized_depth_to_image_vec[num_depth_params * cam_type]); - else - dense_map::rigid_transform_to_array - (normalized_depth_to_image[cam_type], - &normalized_depth_to_image_vec[num_depth_params * cam_type]); - } - - // Put the intrinsics in arrays - std::vector focal_lengths(num_cam_types); - std::vector optical_centers(num_cam_types); - std::vector distortions(num_cam_types); - for (int it = 0; it < num_cam_types; it++) { - focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths - optical_centers[it] = cam_params[it].GetOpticalOffset(); - - if (cam_params[it].GetDistortion().size() == 0) - LOG(FATAL) << "The cameras are expected to have distortion."; - distortions[it] = cam_params[it].GetDistortion(); - } - - // Build a map for quick access for all the messages we may need - // TODO(oalexan1): Must the view be kept open for this to work? - std::vector topics; - for (auto it = 0; it < image_topics.size(); it++) - if (image_topics[it] != "") topics.push_back(image_topics[it]); - for (auto it = 0; it < depth_topics.size(); it++) - if (depth_topics[it] != "") topics.push_back(depth_topics[it]); - rosbag::Bag bag; - bag.open(FLAGS_ros_bag, rosbag::bagmode::Read); - rosbag::View view(bag, rosbag::TopicQuery(topics)); - std::map> bag_map; - dense_map::indexMessages(view, bag_map); - - // A lot of care is needed here. This remembers how we travel in time - // for each camera type so we have fewer messages to search. - // But if a mistake is done below it will mess up this bookkeeping. - std::vector image_start_positions(num_cam_types, 0); - std::vector cloud_start_positions(num_cam_types, 0); - - // Cannot add a (positive) value more this to - // ref_to_cam_timestamp_offsets[cam_type] before getting out of the - // bracket. - std::vector upper_bound(num_cam_types, 1.0e+100); - - // Cannot add a (negative) value less than this to - // ref_to_cam_timestamp_offsets[cam_type] before getting out of the - // bracket. - std::vector lower_bound(num_cam_types, -1.0e+100); - - std::cout << "Bracketing the images in time." << std::endl; - - // Populate the data for each camera image - std::vector cams; - for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { - if (ref_cam_type != 0) - LOG(FATAL) << "It is assumed that the ref cam type is 0."; + bool save_grayscale = true; // for matching we will need grayscale for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { dense_map::cameraImage cam; @@ -1574,14 +1128,13 @@ int main(int argc, char** argv) { cam.beg_ref_index = ref_it; cam.end_ref_index = ref_it; - // Search the whole set of timestamps, so set start_pos = - // 0. This is slower but more robust than keeping track of how - // we move in the increasing order of time. - int start_pos = 0; - bool save_grayscale = true; // for matching we will need grayscale + // Start looking up the image timestamp from this position. Some care + // is needed here. + int start_pos = image_start_positions[cam_type]; // care here double found_time = -1.0; // this has to succeed since we picked the ref images in the map - if (!dense_map::lookupImage(cam.timestamp, bag_map[image_topics[cam_type]], save_grayscale, + if (!dense_map::lookupImage(cam.timestamp, mapVal(bag_map, image_topics[cam_type]), + save_grayscale, // outputs cam.image, image_start_positions[cam_type], // care here found_time)) @@ -1594,19 +1147,20 @@ int main(int argc, char** argv) { << "Cannot look up camera at time " << cam.timestamp << ".\n"; success = true; + image_start_positions[cam_type] = start_pos; // save for next time } else { if (ref_it + 1 >= num_ref_cams) break; // Arrived at the end, cannot do a bracket // Convert the bracketing timestamps to current cam's time double ref_to_cam_offset = ref_to_cam_timestamp_offsets[cam_type]; - double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_offset; - double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_offset; + double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_offset; + double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_offset; if (right_timestamp <= left_timestamp) - LOG(FATAL) << "Ref timestamps must be in increasing order.\n"; + LOG(FATAL) << "Ref timestamps must be in strictly increasing order.\n"; - if (right_timestamp - left_timestamp > FLAGS_bracket_len) + if (right_timestamp - left_timestamp > bracket_len) continue; // Must respect the bracket length // Find the image timestamp closest to the midpoint of the brackets. This will give @@ -1619,7 +1173,6 @@ int main(int argc, char** argv) { // so that at the next iteration we are passed what we // search for. int start_pos = image_start_positions[cam_type]; // care here - bool save_grayscale = true; // for matching we will need grayscale double curr_timestamp = left_timestamp; // start here cv::Mat best_image; double best_dist = 1.0e+100; @@ -1628,7 +1181,7 @@ int main(int argc, char** argv) { if (found_time >= right_timestamp) break; // out of range cv::Mat image; - if (!dense_map::lookupImage(curr_timestamp, bag_map[image_topics[cam_type]], + if (!dense_map::lookupImage(curr_timestamp, mapVal(bag_map, image_topics[cam_type]), save_grayscale, // outputs image, @@ -1663,8 +1216,11 @@ int main(int argc, char** argv) { cam.end_ref_index = ref_it + 1; cam.image = best_image; - upper_bound[cam_type] = std::min(upper_bound[cam_type], best_time - left_timestamp); - lower_bound[cam_type] = std::max(lower_bound[cam_type], best_time - right_timestamp); + // So far these are relative offsets, this will be adjusted further down + max_timestamp_offset[cam_type] + = std::min(max_timestamp_offset[cam_type], best_time - left_timestamp); + min_timestamp_offset[cam_type] + = std::max(min_timestamp_offset[cam_type], best_time - right_timestamp); success = true; } @@ -1689,8 +1245,9 @@ int main(int argc, char** argv) { cv::Mat cloud; // Look up the closest cloud in time (either before or after cam.timestamp) // This need not succeed. - dense_map::lookupCloud(cam.timestamp, bag_map[depth_topics[cam_type]], - FLAGS_max_haz_cam_image_to_depth_timestamp_diff, + dense_map::lookupCloud(cam.timestamp, + mapVal(bag_map, depth_topics[cam_type]), + max_haz_cam_image_to_depth_timestamp_diff, // Outputs cam.depth_cloud, cloud_start_positions[cam_type], // care here @@ -1701,412 +1258,685 @@ int main(int argc, char** argv) { } // end loop over camera types } // end loop over ref images - std::cout << "Allowed timestamp offset range while respecting the brackets for given cameras:\n"; for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { if (cam_type == ref_cam_type) continue; // bounds don't make sense here // So far we had the relative change. Now add the actual offset to get the max allowed offset. - lower_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; - upper_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + min_timestamp_offset[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + max_timestamp_offset[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + } - std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] - << ", " << upper_bound[cam_type] << "]\n"; + std::cout << "Timestamp offset allowed ranges:\n"; + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here + min_timestamp_offset[cam_type] = std::max(min_timestamp_offset[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + - timestamp_offsets_max_change); + max_timestamp_offset[cam_type] = std::min(max_timestamp_offset[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + + timestamp_offsets_max_change); + + // Tighten a bit to ensure we don't exceed things when we add + // and subtract timestamps later. Note that timestamps are + // measured in seconds and fractions of a second since epoch and + // can be quite large so precision loss can easily happen. + min_timestamp_offset[cam_type] += 1.0e-5; + max_timestamp_offset[cam_type] -= 1.0e-5; + std::cout << std::setprecision(8) << cam_names[cam_type] + << ": [" << min_timestamp_offset[cam_type] + << ", " << max_timestamp_offset[cam_type] << "]\n"; } +} - if (FLAGS_float_timestamp_offsets) { - std::cout << "Given the user constraint the timestamp offsets will float in these ranges:\n"; - for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { - if (cam_type == ref_cam_type) continue; // bounds don't make sense here +void multiViewTriangulation( // Inputs + std::vector const& cam_params, + std::vector const& cams, std::vector const& world_to_cam, + std::vector> const& pid_to_cid_fid, + std::vector>> const& keypoint_vec, + // Outputs + std::vector>>& pid_cid_fid_inlier, + std::vector& xyz_vec) { + xyz_vec.resize(pid_to_cid_fid.size()); + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + std::vector focal_length_vec; + std::vector world_to_cam_vec; + std::vector pix_vec; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Triangulate inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; - lower_bound[cam_type] = std::max(lower_bound[cam_type], - ref_to_cam_timestamp_offsets[cam_type] - - FLAGS_timestamp_offsets_max_change); + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); - upper_bound[cam_type] = std::min(upper_bound[cam_type], - ref_to_cam_timestamp_offsets[cam_type] - + FLAGS_timestamp_offsets_max_change); + focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); + world_to_cam_vec.push_back(world_to_cam[cid]); + pix_vec.push_back(undist_ip); + } - // Tighten a bit to ensure we don't exceed things when we add - // and subtract timestamps later. Note that timestamps are - // measured in seconds and fractions of a second since epoch and - // can be quite large so precision loss can easily happen. - lower_bound[cam_type] += 1.0e-5; - upper_bound[cam_type] -= 1.0e-5; + if (pix_vec.size() < 2) { + // If after outlier filtering less than two rays are left, can't triangulate. + // Must set all features for this pid to outliers. + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } - std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] - << ", " << upper_bound[cam_type] << "]\n"; + // Nothing else to do + continue; } + + // Triangulate n rays emanating from given undistorted and centered pixels + xyz_vec[pid] = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); } - // The images from the bag may need to be resized to be the same - // size as in the calibration file. Sometimes the full-res images - // can be so blurry that interest point matching fails, hence the - // resizing. - for (size_t it = 0; it < cam_params.size(); it++) - dense_map::adjustImageSize(cam_params[cams[it].camera_type], cams[it].image); + return; +} - // Sort by the timestamp in reference camera time. This is essential - // for matching each image to other images close in time. - std::sort(cams.begin(), cams.end(), dense_map::timestampLess); +void meshTriangulations( // Inputs + std::vector const& cam_params, + std::vector const& cams, std::vector const& world_to_cam, + std::vector> const& pid_to_cid_fid, + std::vector>> const& pid_cid_fid_inlier, + std::vector>> const& keypoint_vec, + Eigen::Vector3d const& bad_xyz, double min_ray_dist, double max_ray_dist, + mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, + // Outputs + std::vector>>& pid_cid_fid_mesh_xyz, + std::vector& pid_mesh_xyz) { + // Initialize the outputs + pid_cid_fid_mesh_xyz.resize(pid_to_cid_fid.size()); + pid_mesh_xyz.resize(pid_to_cid_fid.size()); + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + Eigen::Vector3d avg_mesh_xyz(0, 0, 0); + int num_intersections = 0; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Initialize this + pid_cid_fid_mesh_xyz[pid][cid][fid] = bad_xyz; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; - if (FLAGS_verbose) { - int count = 10000; - std::vector image_files; - for (size_t it = 0; it < cams.size(); it++) { - std::ostringstream oss; - oss << count << "_" << cams[it].camera_type << ".jpg"; - std::string name = oss.str(); - std::cout << "Writing: " << name << std::endl; - cv::imwrite(name, cams[it].image); - count++; - image_files.push_back(name); + // Intersect the ray with the mesh + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector3d mesh_xyz(0.0, 0.0, 0.0); + bool have_mesh_intersection + = dense_map::ray_mesh_intersect(dist_ip, cam_params[cams[cid].camera_type], + world_to_cam[cid], mesh, bvh_tree, + min_ray_dist, max_ray_dist, + // Output + mesh_xyz); + + if (have_mesh_intersection) { + pid_cid_fid_mesh_xyz[pid][cid][fid] = mesh_xyz; + avg_mesh_xyz += mesh_xyz; + num_intersections += 1; + } } + + // Average the intersections of all rays with the mesh + if (num_intersections >= 1) + avg_mesh_xyz /= num_intersections; + else + avg_mesh_xyz = bad_xyz; + + pid_mesh_xyz[pid] = avg_mesh_xyz; } - // Detect features using multiple threads. Too many threads may result - // in high memory usage. - int num_threads_val = std::min(8u, std::thread::hardware_concurrency()); - std::ostringstream oss; - oss << num_threads_val; - std::string num_threads = oss.str(); - google::SetCommandLineOption("num_threads", num_threads.c_str()); - if (!gflags::GetCommandLineOption("num_threads", &num_threads)) - LOG(FATAL) << "Failed to get the value of --num_threads in Astrobee software.\n"; - std::cout << "Using " << num_threads << " threads for feature detection/matching." << std::endl; - std::cout << "Note that this step uses a huge amount of memory." << std::endl; - - std::cout << "Detecting features." << std::endl; - - std::vector cid_to_descriptor_map; - std::vector cid_to_keypoint_map; - cid_to_descriptor_map.resize(cams.size()); - cid_to_keypoint_map.resize(cams.size()); - { - // Make the threadpool go out of scope when not needed to not use up memory - ff_common::ThreadPool thread_pool; - for (size_t it = 0; it < cams.size(); it++) { - thread_pool.AddTask(&dense_map::detectFeatures, cams[it].image, FLAGS_verbose, - &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + return; +} + +void flagOutlierByExclusionDist( // Inputs + int ref_cam_type, int nav_cam_num_exclude_boundary_pixels, + std::vector const& cam_params, + std::vector const& cams, + std::vector> const& pid_to_cid_fid, + std::vector>> const& keypoint_vec, + // Outputs + std::vector>>& pid_cid_fid_inlier) { + // Initialize the output + pid_cid_fid_inlier.resize(pid_to_cid_fid.size()); + + // Iterate though interest point matches + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Initially there are inliers only + pid_cid_fid_inlier[pid][cid][fid] = 1; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + + if (cams[cid].camera_type == ref_cam_type) { + // Flag as outliers pixels at the nav_cam boundary, if desired. This + // is especially important when the nav_cam uses the radtan + // model instead of fisheye. + Eigen::Vector2i dist_size = cam_params[cams[cid].camera_type].GetDistortedSize(); + int excl = nav_cam_num_exclude_boundary_pixels; + if (dist_ip.x() < excl || dist_ip.x() > dist_size[0] - 1 - excl || + dist_ip.y() < excl || dist_ip.y() > dist_size[1] - 1 - excl) { + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } + } } - thread_pool.Join(); } + return; +} + +// Flag outliers by triangulation angle and reprojection error. It is +// assumed that the cameras in world_to_cam are up-to-date given the +// current state of optimization, and that the residuals (including +// the reprojection errors) have also been updated beforehand. +void flagOutliersByTriAngleAndReprojErr( // Inputs + double refiner_min_angle, double max_reprojection_error, + std::vector> const& pid_to_cid_fid, + std::vector>> const& keypoint_vec, + std::vector const& world_to_cam, std::vector const& xyz_vec, + std::vector>> const& pid_cid_fid_to_residual_index, + std::vector const& residuals, + // Outputs + std::vector>>& pid_cid_fid_inlier) { + // Must deal with outliers by triangulation angle before + // removing outliers by reprojection error, as the latter will + // exclude some rays which form the given triangulated points. + int num_outliers_by_angle = 0, num_total_features = 0; + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + // Find the largest angle among any two intersecting rays + double max_rays_angle = 0.0; + + for (auto cid_fid1 = pid_to_cid_fid[pid].begin(); + cid_fid1 != pid_to_cid_fid[pid].end(); cid_fid1++) { + int cid1 = cid_fid1->first; + int fid1 = cid_fid1->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid1, fid1)) continue; + + num_total_features++; + + Eigen::Vector3d cam_ctr1 = (world_to_cam[cid1].inverse()) * Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d ray1 = xyz_vec[pid] - cam_ctr1; + ray1.normalize(); + + for (auto cid_fid2 = pid_to_cid_fid[pid].begin(); + cid_fid2 != pid_to_cid_fid[pid].end(); cid_fid2++) { + int cid2 = cid_fid2->first; + int fid2 = cid_fid2->second; + + // Look at each cid and next cids + if (cid2 <= cid1) + continue; - dense_map::MATCH_MAP matches; + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid2, fid2)) continue; - std::vector > image_pairs; - for (size_t it1 = 0; it1 < cams.size(); it1++) { - for (size_t it2 = it1 + 1; it2 < std::min(cams.size(), it1 + FLAGS_num_overlaps + 1); it2++) { - image_pairs.push_back(std::make_pair(it1, it2)); + Eigen::Vector3d cam_ctr2 = (world_to_cam[cid2].inverse()) * Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d ray2 = xyz_vec[pid] - cam_ctr2; + ray2.normalize(); + + double curr_angle = (180.0 / M_PI) * acos(ray1.dot(ray2)); + + if (std::isnan(curr_angle) || std::isinf(curr_angle)) continue; + + max_rays_angle = std::max(max_rays_angle, curr_angle); + } } - } - { - // The transform from the world to every camera - std::vector world_to_cam; - calc_world_to_cam_transforms( // Inputs - cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, - // Output - world_to_cam); + if (max_rays_angle >= refiner_min_angle) + continue; // This is a good triangulated point, with large angle of convergence - std::cout << "Matching features." << std::endl; - ff_common::ThreadPool thread_pool; - std::mutex match_mutex; - for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { - auto pair = image_pairs[pair_it]; - int left_image_it = pair.first, right_image_it = pair.second; - if (FLAGS_refiner_skip_filtering) { - thread_pool.AddTask(&dense_map::matchFeatures, - &match_mutex, - left_image_it, right_image_it, - cid_to_descriptor_map[left_image_it], - cid_to_descriptor_map[right_image_it], - cid_to_keypoint_map[left_image_it], - cid_to_keypoint_map[right_image_it], - FLAGS_verbose, &matches[pair]); - } else { - thread_pool.AddTask(&dense_map::matchFeaturesWithCams, - &match_mutex, - left_image_it, right_image_it, - cam_params[cams[left_image_it].camera_type], - cam_params[cams[right_image_it].camera_type], - world_to_cam[left_image_it], - world_to_cam[right_image_it], - FLAGS_initial_max_reprojection_error, - cid_to_descriptor_map[left_image_it], - cid_to_descriptor_map[right_image_it], - cid_to_keypoint_map[left_image_it], - cid_to_keypoint_map[right_image_it], - FLAGS_verbose, &matches[pair]); + // Flag as outliers all the features for this cid + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; + + num_outliers_by_angle++; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } + } + std::cout << std::setprecision(4) << "Removed " << num_outliers_by_angle + << " outlier features with small angle of convergence, out of " + << num_total_features << " (" + << (100.0 * num_outliers_by_angle) / num_total_features << " %)\n"; + + int num_outliers_reproj = 0; + num_total_features = 0; // reusing this variable + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; + + num_total_features++; + + // Find the pixel residuals + size_t residual_index = dense_map::getMapValue(pid_cid_fid_to_residual_index, pid, cid, fid); + if (residuals.size() <= residual_index + 1) LOG(FATAL) << "Too few residuals.\n"; + + double res_x = residuals[residual_index + 0]; + double res_y = residuals[residual_index + 1]; + // NaN values will never be inliers if the comparison is set as below + bool is_good = (Eigen::Vector2d(res_x, res_y).norm() <= max_reprojection_error); + if (!is_good) { + num_outliers_reproj++; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); } } - thread_pool.Join(); } - cid_to_descriptor_map = std::vector(); // Wipe, takes memory - // If feature A in image I matches feather B in image J, which matches feature C in image K, - // then (A, B, C) belong together into a track. Build such a track. + std::cout << std::setprecision(4) << "Removed " << num_outliers_reproj + << " outlier features using reprojection error, out of " << num_total_features + << " (" << (100.0 * num_outliers_reproj) / num_total_features << " %)\n"; + + return; +} + +// Evaluate the residuals before and after optimization +void evalResiduals( // Inputs + std::string const& tag, std::vector const& residual_names, + std::vector const& residual_scales, + // Outputs + ceres::Problem& problem, std::vector& residuals) { + double total_cost = 0.0; + ceres::Problem::EvaluateOptions eval_options; + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + + // Sanity checks, after the residuals are created + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + + // Compensate for the scale + for (size_t it = 0; it < residuals.size(); it++) + residuals[it] /= residual_scales[it]; + + dense_map::calc_median_residuals(residuals, residual_names, tag); + return; +} + +// Given all the merged and filtered tracks in pid_cid_fid, for each +// image pair cid1 and cid2 with cid1 < cid2 < cid1 + num_overlaps + 1, +// save the matches of this pair which occur in the set of tracks. +void saveInlierMatches( // Inputs + std::vector const& image_files, int num_overlaps, + std::vector> const& pid_to_cid_fid, + std::vector>> const& keypoint_vec, + std::vector>> const& pid_cid_fid_inlier) { + MATCH_MAP matches; + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid1 = pid_to_cid_fid[pid].begin(); + cid_fid1 != pid_to_cid_fid[pid].end(); cid_fid1++) { + int cid1 = cid_fid1->first; + int fid1 = cid_fid1->second; + + for (auto cid_fid2 = pid_to_cid_fid[pid].begin(); + cid_fid2 != pid_to_cid_fid[pid].end(); cid_fid2++) { + int cid2 = cid_fid2->first; + int fid2 = cid_fid2->second; + + bool is_good = (cid1 < cid2 && cid2 < cid1 + num_overlaps + 1); + if (!is_good) + continue; + + // Consider inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid1, fid1) || + !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid2, fid2)) + continue; + + auto index_pair = std::make_pair(cid1, cid2); + + InterestPoint ip1(keypoint_vec[cid1][fid1].first, keypoint_vec[cid1][fid1].second); + InterestPoint ip2(keypoint_vec[cid2][fid2].first, keypoint_vec[cid2][fid2].second); - std::vector, int>> keypoint_map(cams.size()); + matches[index_pair].first.push_back(ip1); + matches[index_pair].second.push_back(ip2); + } + } + } // End iterations over pid - // Give all interest points in a given image a unique id for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair const& index_pair = it->first; // alias + auto & index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; int left_index = index_pair.first; int right_index = index_pair.second; - dense_map::MATCH_PAIR const& match_pair = it->second; // alias - std::vector const& left_ip_vec = match_pair.first; - std::vector const& right_ip_vec = match_pair.second; - for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { - auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); - auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); - keypoint_map[left_index][dist_left_ip] = 0; - keypoint_map[right_index][dist_right_ip] = 0; - } + auto & left_image = image_files[left_index]; // alias + auto& right_image = image_files[right_index]; // alias + + std::string left_stem = boost::filesystem::path(left_image).stem().string(); + std::string right_stem = boost::filesystem::path(right_image).stem().string(); + + std::string match_file = left_stem + "__" + right_stem + "-inliers.match"; + std::cout << "Writing: " << left_image << ' ' << right_image << ' ' + << match_file << std::endl; + dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); } +} + +} // namespace dense_map + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; + + dense_map::parameterValidation(); + + // The info below will eventually come from a file + int num_cam_types = 3; + int ref_cam_type = 0; // Below we assume the starting cam is the ref cam + int haz_cam_type = 1; + + // Image and depth topics + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", + "/hw/depth_haz/extended/amplitude_int", + "/hw/cam_sci/compressed"}; + std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; + + // Read the calibration so far + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d nav_cam_to_body_trans; + Eigen::Affine3d haz_cam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, nav_cam_to_body_trans, + haz_cam_depth_to_image_transform); + std::vector orig_cam_params = cam_params; - // Give all interest points in a given image a unique id - // And put them in a vector with the id corresponding to the interest point - std::vector>> keypoint_vec(cams.size()); - for (size_t cam_it = 0; cam_it < cams.size(); cam_it++) { - int count = 0; - for (auto ip_it = keypoint_map[cam_it].begin(); ip_it != keypoint_map[cam_it].end(); ip_it++) { - ip_it->second = count; - count++; - keypoint_vec[cam_it].push_back(ip_it->first); + // Optionally override the timestamp offset + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_names.size(); it++) { + if (cam_names[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + ref_to_cam_timestamp_offsets[it] = new_val; + } } } - openMVG::matching::PairWiseMatches match_map; - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair const& index_pair = it->first; // alias + // Optionally load the mesh + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; + std::shared_ptr bvh_tree; + if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); - int left_index = index_pair.first; - int right_index = index_pair.second; + // If desired to process only specific timestamps + std::set sci_cam_timestamps_to_use; + if (FLAGS_sci_cam_timestamps != "") { + std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); + double val; + while (ifs >> val) sci_cam_timestamps_to_use.insert(val); + } + + // Separate the initial scale. This is convenient if + // haz_cam_depth_to_image is scale * rotation + translation and if + // it is desired to keep the scale fixed. In either case, the scale + // will be multiplied back when needed. + double haz_cam_depth_to_image_scale + = pow(haz_cam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); + Eigen::Affine3d haz_cam_normalized_depth_to_image = haz_cam_depth_to_image_transform; + haz_cam_normalized_depth_to_image.linear() /= haz_cam_depth_to_image_scale; + + // Read the sparse map. It has the ref cam poses. + boost::shared_ptr sparse_map = + boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + + // Optionally deal with using a non-FOV model for nav_cam + Eigen::VectorXd nav_cam_distortion_replacement; + if (FLAGS_nav_cam_distortion_replacement != "") { + std::vector vec = dense_map::string_to_vector(FLAGS_nav_cam_distortion_replacement); + if (vec.size() != 4 && vec.size() != 5) + LOG(FATAL) << "nav_cam distortion replacement must consist of 4 or 5 values, corresponding" + << "to radial and tangential distortion coefficients.\n"; + nav_cam_distortion_replacement + = Eigen::Map(vec.data(), vec.size()); + } + + // Parse the ref timestamps from the sparse map. We assume the + // sparse map image names are the timestamps. + std::vector ref_timestamps; + const std::vector& sparse_map_images = sparse_map->cid_to_filename_; + ref_timestamps.resize(sparse_map_images.size()); + for (size_t cid = 0; cid < sparse_map_images.size(); cid++) + ref_timestamps[cid] = dense_map::fileNameToTimestamp(sparse_map_images[cid]); + if (ref_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; + + // Will optimize the nav cam poses as part of the process + std::vector & world_to_ref_t = sparse_map->cid_to_cam_t_global_; // alias + + // Put transforms of the reference cameras in a vector. We will optimize them. + int num_ref_cams = world_to_ref_t.size(); + if (world_to_ref_t.size() != ref_timestamps.size()) + LOG(FATAL) << "Must have as many ref cam timestamps as ref cameras.\n"; + std::vector world_to_ref_vec(num_ref_cams * dense_map::NUM_RIGID_PARAMS); + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::rigid_transform_to_array(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // Need the identity transform for when the cam is the ref cam, and + // have to have a placeholder for the right bracketing cam which won't be used. + Eigen::Affine3d identity = Eigen::Affine3d::Identity(); + std::vector identity_vec(dense_map::NUM_RIGID_PARAMS); + dense_map::rigid_transform_to_array(identity, &identity_vec[0]); + + // Put all timestamps to use in a vector, in the nav_cam, haz_cam, sci_cam order + std::vector> cam_timestamps_to_use = {std::set(), + std::set(), + sci_cam_timestamps_to_use}; - dense_map::MATCH_PAIR const& match_pair = it->second; // alias - std::vector const& left_ip_vec = match_pair.first; - std::vector const& right_ip_vec = match_pair.second; + // Which intrinsics from which cameras to float + std::vector> intrinsics_to_float(num_cam_types); + dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); + dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); + dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); - std::vector mvg_matches; + std::string depth_to_image_name = "depth_to_image"; + std::set extrinsics_to_float; + dense_map::parse_extrinsics_to_float(cam_names, cam_names[ref_cam_type], depth_to_image_name, + FLAGS_extrinsics_to_float, extrinsics_to_float); - for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { - auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); - auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + if (!FLAGS_affine_depth_to_image && FLAGS_float_scale && + extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + LOG(FATAL) << "Cannot float the scale of depth_to_image_transform unless this " + << "this is allowed as part of --extrinsics_to_float.\n"; - int left_id = keypoint_map[left_index][dist_left_ip]; - int right_id = keypoint_map[right_index][dist_right_ip]; - mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); + if (FLAGS_nav_cam_distortion_replacement != "") { + if (intrinsics_to_float[ref_cam_type].find("distortion") + == intrinsics_to_float[ref_cam_type].end() || + intrinsics_to_float[ref_cam_type].size() != 1) { + LOG(FATAL) << "When --nav_cam_distortion_replacement is used, must float the nav cam " + << "distortion and no other nav cam intrinsics.\n"; } - match_map[index_pair] = mvg_matches; } - if (FLAGS_verbose) { - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair index_pair = it->first; - dense_map::MATCH_PAIR const& match_pair = it->second; - - int left_index = index_pair.first; - int right_index = index_pair.second; - - std::ostringstream oss1; - oss1 << (10000 + left_index) << "_" << cams[left_index].camera_type; + // Put the extrinsics in arrays, so we can optimize them + std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::rigid_transform_to_array + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - std::string left_stem = oss1.str(); - std::string left_image = left_stem + ".jpg"; + // Set up the variable blocks to optimize for BracketedDepthError + int num_depth_params = dense_map::NUM_RIGID_PARAMS; + if (FLAGS_affine_depth_to_image) num_depth_params = dense_map::NUM_AFFINE_PARAMS; - std::ostringstream oss2; - oss2 << (10000 + right_index) << "_" << cams[right_index].camera_type; + // Depth to image transforms and scales + std::vector normalized_depth_to_image; + std::vector depth_to_image_scales = {1.0, haz_cam_depth_to_image_scale, 1.0}; + normalized_depth_to_image.push_back(Eigen::Affine3d::Identity()); + normalized_depth_to_image.push_back(haz_cam_normalized_depth_to_image); + normalized_depth_to_image.push_back(Eigen::Affine3d::Identity()); - std::string right_stem = oss2.str(); - std::string right_image = right_stem + ".jpg"; + // Put depth_to_image arrays, so we can optimize them + std::vector normalized_depth_to_image_vec(num_cam_types * num_depth_params); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (FLAGS_affine_depth_to_image) + dense_map::affine_transform_to_array + (normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); + else + dense_map::rigid_transform_to_array + (normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); + } - std::string match_file = left_stem + "__" + right_stem + ".match"; + // Put the intrinsics in arrays + std::vector focal_lengths(num_cam_types); + std::vector optical_centers(num_cam_types); + std::vector distortions(num_cam_types); + for (int it = 0; it < num_cam_types; it++) { + focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths + optical_centers[it] = cam_params[it].GetOpticalOffset(); - std::cout << "Writing: " << left_image << ' ' << right_image << ' ' - << match_file << std::endl; - dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); - } + if (cam_params[it].GetDistortion().size() == 0) + LOG(FATAL) << "The cameras are expected to have distortion."; + distortions[it] = cam_params[it].GetDistortion(); } - // TODO(oalexan1): De-allocate all data when no longer need as - // the matching logic takes a lot of time. - - // not needed anymore and take up a lot of RAM - matches.clear(); matches = dense_map::MATCH_MAP(); - keypoint_map.clear(); keypoint_map.shrink_to_fit(); - cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); - - std::vector > pid_to_cid_fid; - { - // Build tracks - // De-allocate these as soon as not needed to save memory - openMVG::tracks::TracksBuilder trackBuilder; - trackBuilder.Build(match_map); // Build: Efficient fusion of correspondences - trackBuilder.Filter(); // Filter: Remove tracks that have conflict - // trackBuilder.ExportToStream(std::cout); - // Export tracks as a map (each entry is a sequence of imageId and featureIndex): - // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} - openMVG::tracks::STLMAPTracks map_tracks; - trackBuilder.ExportToSTL(map_tracks); - match_map = openMVG::matching::PairWiseMatches(); // wipe this, no longer needed - trackBuilder = openMVG::tracks::TracksBuilder(); // wipe it - - if (map_tracks.empty()) - LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; - - size_t num_elems = map_tracks.size(); - // Populate the filtered tracks - pid_to_cid_fid.clear(); - pid_to_cid_fid.resize(num_elems); - size_t curr_id = 0; - for (auto itr = map_tracks.begin(); itr != map_tracks.end(); itr++) { - for (auto itr2 = (itr->second).begin(); itr2 != (itr->second).end(); itr2++) { - pid_to_cid_fid[curr_id][itr2->first] = itr2->second; - } - curr_id++; - } - } + // Build a map for quick access for all the messages we may need + // TODO(oalexan1): Must the view be kept open for this to work? + std::vector topics; + for (auto it = 0; it < image_topics.size(); it++) + if (image_topics[it] != "") topics.push_back(image_topics[it]); + for (auto it = 0; it < depth_topics.size(); it++) + if (depth_topics[it] != "") topics.push_back(depth_topics[it]); + rosbag::Bag bag; + bag.open(FLAGS_ros_bag, rosbag::bagmode::Read); + rosbag::View view(bag, rosbag::TopicQuery(topics)); + dense_map::StrToMsgMap bag_map; + dense_map::indexMessages(view, bag_map); - // Set up the variable blocks to optimize for BracketedCamError - std::vector bracketed_cam_block_sizes; - int num_focal_lengths = 1; - int num_distortion_params = 1; // will be overwritten - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - bracketed_cam_block_sizes.push_back(num_focal_lengths); - bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); - bracketed_cam_block_sizes.push_back(num_distortion_params); // will be modified later + // Keep here the images, timestamps, and bracketing information + std::vector cams; + // The range of ref_to_cam_timestamp_offsets[cam_type] before + // getting out of the bracket. + std::vector min_timestamp_offset, max_timestamp_offset; + + // Select the images to use from the bag + dense_map::lookupImagesAndBrackets( // Inputs + ref_cam_type, FLAGS_bracket_len, FLAGS_timestamp_offsets_max_change, + FLAGS_max_haz_cam_image_to_depth_timestamp_diff, FLAGS_float_timestamp_offsets, cam_names, + ref_timestamps, image_topics, depth_topics, bag_map, cam_timestamps_to_use, + ref_to_cam_timestamp_offsets, + // Outputs + cams, min_timestamp_offset, max_timestamp_offset); - // Set up the variable blocks to optimize for RefCamError - std::vector ref_cam_block_sizes; - ref_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - ref_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - ref_cam_block_sizes.push_back(num_focal_lengths); - ref_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); - ref_cam_block_sizes.push_back(num_distortion_params); // will be modified later + // The images from the bag may need to be resized to be the same + // size as in the calibration file. Sometimes the full-res images + // can be so blurry that interest point matching fails, hence the + // resizing. + for (size_t it = 0; it < cams.size(); it++) + dense_map::adjustImageSize(cam_params[cams[it].camera_type], cams[it].image); - // Set up variable blocks to optimize for BracketedDepthError - std::vector bracketed_depth_block_sizes; - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(num_depth_params); - bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + // Sort by the timestamp in reference camera time. This is essential + // for matching each image to other images close in time. + std::sort(cams.begin(), cams.end(), dense_map::timestampLess); - // Set up the variable blocks to optimize for BracketedDepthMeshError + // The transform from the world to every camera + std::vector world_to_cam; + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, + ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + + // Detect and match features + std::vector>> keypoint_vec; + std::vector> pid_to_cid_fid; + std::vector image_files; // will be filled only in verbose mode + dense_map::detectMatchFeatures( // Inputs + cams, cam_names, cam_params, world_to_cam, + FLAGS_num_overlaps, FLAGS_initial_max_reprojection_error, + FLAGS_num_match_threads, FLAGS_verbose, + // Outputs + keypoint_vec, pid_to_cid_fid, image_files); + + // Set up the block sizes + std::vector bracketed_cam_block_sizes; + std::vector bracketed_depth_block_sizes; std::vector bracketed_depth_mesh_block_sizes; - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_mesh_block_sizes.push_back(num_depth_params); - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - - // Set up the variable blocks to optimize for RefDepthError - std::vector ref_depth_block_sizes; - ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - ref_depth_block_sizes.push_back(num_depth_params); - ref_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - ref_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - - // Set up the variable blocks to optimize for the mesh xyz std::vector mesh_block_sizes; - mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + dense_map::set_up_block_sizes(num_depth_params, + bracketed_cam_block_sizes, bracketed_depth_block_sizes, + bracketed_depth_mesh_block_sizes, mesh_block_sizes); // For a given fid = pid_to_cid_fid[pid][cid], the value // pid_cid_fid_inlier[pid][cid][fid] will be non-zero only if this // pixel is an inlier. Originally all pixels are inliers. Once an // inlier becomes an outlier, it never becomes an inlier again. std::vector>> pid_cid_fid_inlier; - if (!FLAGS_refiner_skip_filtering) { - pid_cid_fid_inlier.resize(pid_to_cid_fid.size()); - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - pid_cid_fid_inlier[pid][cid][fid] = 1; - } - } - } + dense_map::flagOutlierByExclusionDist( // Inputs + ref_cam_type, FLAGS_nav_cam_num_exclude_boundary_pixels, cam_params, cams, pid_to_cid_fid, + keypoint_vec, + // Outputs + pid_cid_fid_inlier); + + // Structures needed to intersect rays with the mesh + std::vector>> pid_cid_fid_mesh_xyz; + std::vector pid_mesh_xyz; + Eigen::Vector3d bad_xyz(1.0e+100, 1.0e+100, 1.0e+100); // use this to flag invalid xyz for (int pass = 0; pass < FLAGS_refiner_num_passes; pass++) { std::cout << "\nOptimization pass " << pass + 1 << " / " << FLAGS_refiner_num_passes << "\n"; // The transforms from the world to all cameras must be updated // given the current state of optimization - std::vector world_to_cam; calc_world_to_cam_transforms( // Inputs cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, // Output world_to_cam); - // Do multiview triangulation - std::vector xyz_vec(pid_to_cid_fid.size()); - - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - std::vector focal_length_vec; - std::vector world_to_cam_vec; - std::vector pix_vec; - - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Triangulate inliers only - if (!FLAGS_refiner_skip_filtering && - !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) - continue; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - - if (cams[cid].camera_type == ref_cam_type) { - // Flag as outliers pixels at the nav_cam boundary, if desired. This - // is especially important when the nav_cam uses the radtan - // model instead of fisheye. - Eigen::Vector2i dist_size = cam_params[cams[cid].camera_type].GetDistortedSize(); - int excl = FLAGS_nav_cam_num_exclude_boundary_pixels; - if (dist_ip.x() < excl || dist_ip.x() > dist_size[0] - 1 - excl || - dist_ip.y() < excl || dist_ip.y() > dist_size[1] - 1 - excl) { - dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); - continue; - } - } - - Eigen::Vector2d undist_ip; - cam_params[cams[cid].camera_type].Convert - (dist_ip, &undist_ip); - - focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); - world_to_cam_vec.push_back(world_to_cam[cid]); - pix_vec.push_back(undist_ip); - } - - if (!FLAGS_refiner_skip_filtering && pix_vec.size() < 2) { - // If after outlier filtering less than two rays are left, can't triangulate. - // Must set all features for this pid to outliers. - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); - } - - // Nothing else to do - continue; - } - - // Triangulate n rays emanating from given undistorted and centered pixels - xyz_vec[pid] = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); - } + std::vector xyz_vec; + dense_map::multiViewTriangulation( // Inputs + cam_params, cams, world_to_cam, pid_to_cid_fid, + keypoint_vec, + // Outputs + pid_cid_fid_inlier, xyz_vec); + + // Compute where each ray intersects the mesh + if (FLAGS_mesh != "") + dense_map::meshTriangulations( // Inputs + cam_params, cams, world_to_cam, pid_to_cid_fid, + pid_cid_fid_inlier, keypoint_vec, bad_xyz, FLAGS_min_ray_dist, FLAGS_max_ray_dist, mesh, + bvh_tree, + // Outputs + pid_cid_fid_mesh_xyz, pid_mesh_xyz); if (pass == 0 && nav_cam_distortion_replacement.size() > 1) { // At the first pass, right after triangulation is done with a @@ -2120,11 +1950,11 @@ int main(int argc, char** argv) { } // For a given fid = pid_to_cid_fid[pid][cid], - // the value pid_cid_fid_to_residual[pid][cid][fid] will be the index in the array + // the value pid_cid_fid_to_residual_index[pid][cid][fid] will be the index in the array // of residuals (look only at pixel residuals). This structure is populated only for // inliers, so its size changes at each pass. - std::vector>> pid_cid_fid_to_residual; - if (!FLAGS_refiner_skip_filtering) pid_cid_fid_to_residual.resize(pid_to_cid_fid.size()); + std::vector>> pid_cid_fid_to_residual_index; + pid_cid_fid_to_residual_index.resize(pid_to_cid_fid.size()); // Form the problem ceres::Problem problem; @@ -2137,137 +1967,92 @@ int main(int argc, char** argv) { int fid = cid_fid->second; // Deal with inliers only - if (!FLAGS_refiner_skip_filtering && - !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; int cam_type = cams[cid].camera_type; int beg_ref_index = cams[cid].beg_ref_index; int end_ref_index = cams[cid].end_ref_index; - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - - if (cam_type == ref_cam_type) { - // The cost function of projecting in the ref cam. - ceres::CostFunction* ref_cost_function = - dense_map::RefCamError::Create(dist_ip, ref_cam_block_sizes, - cam_params[cam_type]); - ceres::LossFunction* ref_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - // Remember the index of the residuals about to create - if (!FLAGS_refiner_skip_filtering) - pid_cid_fid_to_residual[pid][cid][fid] = residual_names.size(); - - residual_names.push_back(cam_names[cam_type] + "_pix_x"); - residual_names.push_back(cam_names[cam_type] + "_pix_y"); - residual_scales.push_back(1.0); - residual_scales.push_back(1.0); - problem.AddResidualBlock - (ref_cost_function, ref_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &xyz_vec[pid][0], - &focal_lengths[cam_type], - &optical_centers[cam_type][0], - &distortions[cam_type][0]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - } - - Eigen::Vector3d depth_xyz(0, 0, 0); - if (FLAGS_depth_tri_weight == 0 || - !dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) - continue; // could not look up the depth value - - // The constraint that the depth points agree with triangulated points - ceres::CostFunction* ref_depth_cost_function - = dense_map::RefDepthError::Create(FLAGS_depth_tri_weight, - depth_xyz, - ref_depth_block_sizes); + // Left bracketing ref cam for a given cam. For a ref cam, this is itself. + double* left_ref_cam_ptr = &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]; - ceres::LossFunction* ref_depth_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + // Transform from reference camera to given camera + double* ref_to_cam_ptr = &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]; - residual_names.push_back("depth_tri_x_m"); - residual_names.push_back("depth_tri_y_m"); - residual_names.push_back("depth_tri_z_m"); - residual_scales.push_back(FLAGS_depth_tri_weight); - residual_scales.push_back(FLAGS_depth_tri_weight); - residual_scales.push_back(FLAGS_depth_tri_weight); - problem.AddResidualBlock - (ref_depth_cost_function, - ref_depth_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &normalized_depth_to_image_vec[num_depth_params * cam_type], - &depth_to_image_scales[cam_type], - &xyz_vec[pid][0]); + // When the cam is the ref type, the right bracketing camera is nominal + double* right_ref_cam_ptr = NULL; + if (cam_type == ref_cam_type) + right_ref_cam_ptr = &identity_vec[0]; + else + right_ref_cam_ptr = &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]; - if (!FLAGS_float_sparse_map) - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) - problem.SetParameterBlockConstant - (&normalized_depth_to_image_vec[num_depth_params * cam_type]); + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + ceres::CostFunction* bracketed_cost_function = + dense_map::BracketedCamError::Create(dist_ip, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* bracketed_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // Remember the index of the residuals about to create + pid_cid_fid_to_residual_index[pid][cid][fid] = residual_names.size(); + + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (bracketed_cost_function, bracketed_loss_function, + left_ref_cam_ptr, right_ref_cam_ptr, ref_to_cam_ptr, &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type], + &focal_lengths[cam_type], &optical_centers[cam_type][0], &distortions[cam_type][0]); + + // See which intrinsics to float + if (intrinsics_to_float[cam_type].find("focal_length") == + intrinsics_to_float[cam_type].end()) + problem.SetParameterBlockConstant(&focal_lengths[cam_type]); + if (intrinsics_to_float[cam_type].find("optical_center") == + intrinsics_to_float[cam_type].end()) + problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); + if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) + problem.SetParameterBlockConstant(&distortions[cam_type][0]); + + // When the camera is the ref type, the right bracketing + // camera is just a placeholder which is not used, hence + // should not be optimized. Same for the ref_to_cam_vec and + // ref_to_cam_timestamp_offsets, etc., as can seen further + // down. + + if (!FLAGS_float_sparse_map) problem.SetParameterBlockConstant(left_ref_cam_ptr); + + if (!FLAGS_float_sparse_map || cam_type == ref_cam_type) + problem.SetParameterBlockConstant(right_ref_cam_ptr); + + if (!FLAGS_float_timestamp_offsets || cam_type == ref_cam_type) { + // Either we don't float timestamp offsets at all, or the cam is the ref type, + // when it can't float anyway. + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); } else { - // Other cameras, which need bracketing - ceres::CostFunction* bracketed_cost_function = - dense_map::BracketedCamError::Create(dist_ip, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_cam_block_sizes, - cam_params[cam_type]); - ceres::LossFunction* bracketed_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - // Remember the index of the residuals about to create - if (!FLAGS_refiner_skip_filtering) - pid_cid_fid_to_residual[pid][cid][fid] = residual_names.size(); - - residual_names.push_back(cam_names[cam_type] + "_pix_x"); - residual_names.push_back(cam_names[cam_type] + "_pix_y"); - residual_scales.push_back(1.0); - residual_scales.push_back(1.0); - problem.AddResidualBlock - (bracketed_cost_function, bracketed_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &xyz_vec[pid][0], - &ref_to_cam_timestamp_offsets[cam_type], - &focal_lengths[cam_type], - &optical_centers[cam_type][0], - &distortions[cam_type][0]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } - if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { - problem.SetParameterBlockConstant - (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - } + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + min_timestamp_offset[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + max_timestamp_offset[cam_type]); + } + if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end() || + cam_type == ref_cam_type) + problem.SetParameterBlockConstant(ref_to_cam_ptr); - Eigen::Vector3d depth_xyz(0, 0, 0); - if (FLAGS_depth_tri_weight == 0 || - !dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) - continue; // could not look up the depth value + Eigen::Vector3d depth_xyz(0, 0, 0); + bool have_depth_tri_constraint + = (FLAGS_depth_tri_weight > 0 && + dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)); + if (have_depth_tri_constraint) { // Ensure that the depth points agree with triangulated points ceres::CostFunction* bracketed_depth_cost_function = dense_map::BracketedDepthError::Create(FLAGS_depth_tri_weight, @@ -2287,220 +2072,107 @@ int main(int argc, char** argv) { residual_scales.push_back(FLAGS_depth_tri_weight); residual_scales.push_back(FLAGS_depth_tri_weight); problem.AddResidualBlock - (bracketed_depth_cost_function, - bracketed_depth_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + (bracketed_depth_cost_function, bracketed_depth_loss_function, + left_ref_cam_ptr, right_ref_cam_ptr, ref_to_cam_ptr, &normalized_depth_to_image_vec[num_depth_params * cam_type], &depth_to_image_scales[cam_type], &xyz_vec[pid][0], &ref_to_cam_timestamp_offsets[cam_type]); - // TODO(oalexan1): This code repeats too much. Need to keep a hash - // of sparse map pointers. - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } + // Note that above we already considered fixing some params. + // We won't repeat that code here. + // If we model an affine depth to image, fix its scale here, + // it will change anyway as part of normalized_depth_to_image_vec. if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } - if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { - problem.SetParameterBlockConstant - (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - } + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) problem.SetParameterBlockConstant (&normalized_depth_to_image_vec[num_depth_params * cam_type]); } - } - } - - if (FLAGS_mesh != "") { - // Add the mesh constraint - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + // Add the depth to mesh constraint + bool have_depth_mesh_constraint = false; + depth_xyz = Eigen::Vector3d(0, 0, 0); Eigen::Vector3d mesh_xyz(0, 0, 0); - int num_intersections = 0; // Number of mesh intersections given the rays for this pid - - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Deal with inliers only - if (!FLAGS_refiner_skip_filtering && - !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) - continue; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - Eigen::Vector2d undist_ip; - cam_params[cams[cid].camera_type].Convert - (dist_ip, &undist_ip); - - // Intersect the ray with the mesh - bool have_mesh_intersection = false; - Eigen::Vector3d mesh_intersect(0.0, 0.0, 0.0); - have_mesh_intersection - = dense_map::ray_mesh_intersect(undist_ip, cam_params[cams[cid].camera_type], - world_to_cam[cid], mesh, bvh_tree, - FLAGS_min_ray_dist, FLAGS_max_ray_dist, - // Output - mesh_intersect); - - if (have_mesh_intersection) { - mesh_xyz += mesh_intersect; - num_intersections += 1; - } - - if (FLAGS_depth_mesh_weight > 0 && have_mesh_intersection) { - // Try to make each mesh intersection agree with corresponding depth measurement, - // if it exists - Eigen::Vector3d depth_xyz(0, 0, 0); - if (dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) { - int cam_type = cams[cid].camera_type; - int beg_ref_index = cams[cid].beg_ref_index; - int end_ref_index = cams[cid].end_ref_index; - if (cam_type != ref_cam_type) { - // TODO(oalexan1): What to do when cam_type == ref_cam_type? - // Ensure that the depth points agree with mesh points - ceres::CostFunction* bracketed_depth_mesh_cost_function - = dense_map::BracketedDepthMeshError::Create(FLAGS_depth_mesh_weight, - depth_xyz, - mesh_intersect, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_depth_mesh_block_sizes); - - ceres::LossFunction* bracketed_depth_mesh_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - residual_names.push_back("depth_mesh_x_m"); - residual_names.push_back("depth_mesh_y_m"); - residual_names.push_back("depth_mesh_z_m"); - residual_scales.push_back(FLAGS_depth_mesh_weight); - residual_scales.push_back(FLAGS_depth_mesh_weight); - residual_scales.push_back(FLAGS_depth_mesh_weight); - problem.AddResidualBlock - (bracketed_depth_mesh_cost_function, - bracketed_depth_mesh_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &normalized_depth_to_image_vec[num_depth_params * cam_type], - &depth_to_image_scales[cam_type], - &ref_to_cam_timestamp_offsets[cam_type]); - - // TODO(oalexan1): This code repeats too much. Need to keep a hash - // of sparse map pointers. - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } - if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } - if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end()) { - problem.SetParameterBlockConstant - (&ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - } - if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) - problem.SetParameterBlockConstant - (&normalized_depth_to_image_vec[num_depth_params * cam_type]); - } - } - } + if (FLAGS_mesh != "") { + mesh_xyz = dense_map::getMapValue(pid_cid_fid_mesh_xyz, pid, cid, fid); + have_depth_mesh_constraint + = (FLAGS_depth_mesh_weight > 0 && mesh_xyz != bad_xyz && + dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)); } - if (num_intersections >= 1 && FLAGS_mesh_tri_weight > 0) { - // Average the intersections of all rays with the mesh, and try to make - // the triangulated point agree with the mesh intersection + if (have_depth_mesh_constraint) { + // Try to make each mesh intersection agree with corresponding depth measurement, + // if it exists + ceres::CostFunction* bracketed_depth_mesh_cost_function + = dense_map::BracketedDepthMeshError::Create(FLAGS_depth_mesh_weight, + depth_xyz, + mesh_xyz, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_mesh_block_sizes); + + ceres::LossFunction* bracketed_depth_mesh_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - mesh_xyz /= num_intersections; + residual_names.push_back("depth_mesh_x_m"); + residual_names.push_back("depth_mesh_y_m"); + residual_names.push_back("depth_mesh_z_m"); + residual_scales.push_back(FLAGS_depth_mesh_weight); + residual_scales.push_back(FLAGS_depth_mesh_weight); + residual_scales.push_back(FLAGS_depth_mesh_weight); + problem.AddResidualBlock + (bracketed_depth_mesh_cost_function, bracketed_depth_mesh_loss_function, + left_ref_cam_ptr, right_ref_cam_ptr, ref_to_cam_ptr, + &normalized_depth_to_image_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &ref_to_cam_timestamp_offsets[cam_type]); - ceres::CostFunction* mesh_cost_function = - dense_map::XYZError::Create(mesh_xyz, mesh_block_sizes, FLAGS_mesh_tri_weight); + // Note that above we already fixed some of these variables. + // Repeat the fixing of depth variables, however, as the previous block + // may not take place. + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - ceres::LossFunction* mesh_loss_function = - dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&normalized_depth_to_image_vec[num_depth_params * cam_type]); + } + } // end iterating over all cid for given pid + + // This constraint will be for the pid + bool have_mesh_tri_constraint = false; + Eigen::Vector3d avg_mesh_xyz(0, 0, 0); + if (FLAGS_mesh != "") { + avg_mesh_xyz = pid_mesh_xyz.at(pid); + if (FLAGS_mesh_tri_weight > 0 && avg_mesh_xyz != bad_xyz) have_mesh_tri_constraint = true; + } + if (have_mesh_tri_constraint) { + // Try to make the triangulated point agree with the mesh intersection - problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, - &xyz_vec[pid][0]); + ceres::CostFunction* mesh_cost_function = + dense_map::XYZError::Create(avg_mesh_xyz, mesh_block_sizes, FLAGS_mesh_tri_weight); - residual_names.push_back("mesh_tri_x_m"); - residual_names.push_back("mesh_tri_y_m"); - residual_names.push_back("mesh_tri_z_m"); + ceres::LossFunction* mesh_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - residual_scales.push_back(FLAGS_mesh_tri_weight); - residual_scales.push_back(FLAGS_mesh_tri_weight); - residual_scales.push_back(FLAGS_mesh_tri_weight); - } - } - } + problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, + &xyz_vec[pid][0]); - // See which intrinsics from which cam to float or keep fixed - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { - if (intrinsics_to_float[cam_type].find("focal_length") == - intrinsics_to_float[cam_type].end()) { - // std::cout << "For " << cam_names[cam_type] << " not floating focal_length." << std::endl; - problem.SetParameterBlockConstant(&focal_lengths[cam_type]); - } else { - // std::cout << "For " << cam_names[cam_type] << " floating focal_length." << std::endl; - } - if (intrinsics_to_float[cam_type].find("optical_center") == - intrinsics_to_float[cam_type].end()) { - // std::cout << "For " << cam_names[cam_type] << " not floating optical_center." << - // std::endl; - problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); - } else { - // std::cout << "For " << cam_names[cam_type] << " floating optical_center." << std::endl; + residual_names.push_back("mesh_tri_x_m"); + residual_names.push_back("mesh_tri_y_m"); + residual_names.push_back("mesh_tri_z_m"); + residual_scales.push_back(FLAGS_mesh_tri_weight); + residual_scales.push_back(FLAGS_mesh_tri_weight); + residual_scales.push_back(FLAGS_mesh_tri_weight); } - if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) { - // std::cout << "For " << cam_names[cam_type] << " not floating distortion." << std::endl; - problem.SetParameterBlockConstant(&distortions[cam_type][0]); - } else { - // std::cout << "For " << cam_names[cam_type] << " floating distortion." << std::endl; - } - } + } // end iterating over pid // Evaluate the residuals before optimization - double total_cost = 0.0; std::vector residuals; - ceres::Problem::EvaluateOptions eval_options; - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "There must be as many residual names as residual values."; - if (residuals.size() != residual_scales.size()) - LOG(FATAL) << "There must be as many residual values as residual scales."; - for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale - residuals[it] /= residual_scales[it]; - dense_map::calc_median_residuals(residuals, residual_names, "before opt"); - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "Initial res " << residual_names[it] << " " << residuals[it] << std::endl; - } + dense_map::evalResiduals("before opt", residual_names, residual_scales, problem, residuals); // Solve the problem ceres::Solver::Options options; @@ -2514,8 +2186,8 @@ int main(int argc, char** argv) { options.parameter_tolerance = FLAGS_parameter_tolerance; ceres::Solve(options, &problem, &summary); - // The optimization is done. Right away copy the optimized states to where they belong to - // keep all data in sync. + // The optimization is done. Right away copy the optimized states + // to where they belong to keep all data in sync. // Copy back the reference transforms for (int cid = 0; cid < num_ref_cams; cid++) @@ -2556,136 +2228,31 @@ int main(int argc, char** argv) { } // Evaluate the residuals after optimization - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "There must be as many residual names as residual values."; - if (residuals.size() != residual_scales.size()) - LOG(FATAL) << "There must be as many residual values as residual scales."; - for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale - residuals[it] /= residual_scales[it]; - dense_map::calc_median_residuals(residuals, residual_names, "after opt"); - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "Final res " << residual_names[it] << " " << residuals[it] << std::endl; - } - - // Flag outliers - if (!FLAGS_refiner_skip_filtering) { - // Before computing outliers by triangulation angle must recompute all the cameras, - // given the optimized parameters - calc_world_to_cam_transforms( // Inputs - cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, - // Output - world_to_cam); - - // Must deal with outliers by triangulation angle before - // removing outliers by reprojection error, as the latter will - // exclude some rays which form the given triangulated points. - int num_outliers_by_angle = 0, num_total_features = 0; - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - // Find the largest angle among any two intersecting rays - double max_rays_angle = 0.0; - - for (auto cid_fid1 = pid_to_cid_fid[pid].begin(); - cid_fid1 != pid_to_cid_fid[pid].end(); cid_fid1++) { - int cid1 = cid_fid1->first; - int fid1 = cid_fid1->second; - - // Deal with inliers only - if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid1, fid1)) continue; - - num_total_features++; - - Eigen::Vector3d cam_ctr1 = (world_to_cam[cid1].inverse()) * Eigen::Vector3d(0, 0, 0); - Eigen::Vector3d ray1 = xyz_vec[pid] - cam_ctr1; - ray1.normalize(); - - for (auto cid_fid2 = pid_to_cid_fid[pid].begin(); - cid_fid2 != pid_to_cid_fid[pid].end(); cid_fid2++) { - int cid2 = cid_fid2->first; - int fid2 = cid_fid2->second; - - // Look at each cid and next cids - if (cid2 <= cid1) - continue; - - // Deal with inliers only - if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid2, fid2)) continue; - - Eigen::Vector3d cam_ctr2 = (world_to_cam[cid2].inverse()) * Eigen::Vector3d(0, 0, 0); - Eigen::Vector3d ray2 = xyz_vec[pid] - cam_ctr2; - ray2.normalize(); - - double curr_angle = (180.0 / M_PI) * acos(ray1.dot(ray2)); - - if (std::isnan(curr_angle) || std::isinf(curr_angle)) continue; - - max_rays_angle = std::max(max_rays_angle, curr_angle); - } - } - - if (max_rays_angle >= FLAGS_refiner_min_angle) - continue; // This is a good triangulated point, with large angle of convergence - - // Flag as outliers all the features for this cid - for (auto cid_fid = pid_to_cid_fid[pid].begin(); - cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Deal with inliers only - if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; - - num_outliers_by_angle++; - dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); - } - } - std::cout << std::setprecision(4) << "Removed " << num_outliers_by_angle - << " outlier features with small angle of convergence, out of " - << num_total_features << " (" - << (100.0 * num_outliers_by_angle) / num_total_features << " %)\n"; - - int num_outliers_reproj = 0; - num_total_features = 0; // reusing this variable - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - for (auto cid_fid = pid_to_cid_fid[pid].begin(); - cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - // Deal with inliers only - if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; - - num_total_features++; - - // Find the pixel residuals - size_t residual_index = dense_map::getMapValue(pid_cid_fid_to_residual, pid, cid, fid); - if (residuals.size() <= residual_index + 1) LOG(FATAL) << "Too few residuals.\n"; - - double res_x = residuals[residual_index + 0]; - double res_y = residuals[residual_index + 1]; - // NaN values will never be inliers if the comparison is set as below - bool is_good = (Eigen::Vector2d(res_x, res_y).norm() <= FLAGS_max_reprojection_error); - if (!is_good) { - num_outliers_reproj++; - dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); - } - } - } - std::cout << std::setprecision(4) << "Removed " << num_outliers_reproj - << " outlier features using reprojection error, out of " << num_total_features - << " (" << (100.0 * num_outliers_reproj) / num_total_features << " %)\n"; - } - } + dense_map::evalResiduals("after opt", residual_names, residual_scales, problem, residuals); + + // Flag outliers after this pass + calc_world_to_cam_transforms + ( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + // Must have up-to-date world_to_cam and residuals to flag the outliers + dense_map::flagOutliersByTriAngleAndReprojErr( // Inputs + FLAGS_refiner_min_angle, FLAGS_max_reprojection_error, pid_to_cid_fid, keypoint_vec, + world_to_cam, xyz_vec, pid_cid_fid_to_residual_index, residuals, + // Outputs + pid_cid_fid_inlier); + } // End optimization passes + + if (FLAGS_verbose) + dense_map::saveInlierMatches(image_files, FLAGS_num_overlaps, pid_to_cid_fid, + keypoint_vec, pid_cid_fid_inlier); bool map_changed = (FLAGS_num_iterations > 0 && (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "")); if (map_changed) { std::cout << "Either the sparse map intrinsics or cameras got modified. " << "The map must be rebuilt." << std::endl; - dense_map::RebuildMap(FLAGS_output_map, // Will be used for temporary saving of aux data cam_params[ref_cam_type], sparse_map); } @@ -2712,9 +2279,8 @@ int main(int argc, char** argv) { } // Update the optimized depth to image (for haz cam only) - int cam_type = 1; // haz cam - haz_cam_depth_to_image_scale = depth_to_image_scales[cam_type]; - haz_cam_depth_to_image_transform = normalized_depth_to_image[cam_type]; + haz_cam_depth_to_image_scale = depth_to_image_scales[haz_cam_type]; + haz_cam_depth_to_image_transform = normalized_depth_to_image[haz_cam_type]; haz_cam_depth_to_image_transform.linear() *= haz_cam_depth_to_image_scale; // Update the config file @@ -2727,19 +2293,22 @@ int main(int argc, char** argv) { sparse_map->Save(FLAGS_output_map); if (FLAGS_out_texture_dir != "") { + // Project each image onto the mesh + if (FLAGS_mesh == "") LOG(FATAL) << "Cannot project camera images onto a mesh if a mesh was not provided.\n"; - // The transform from the world to every camera - std::vector world_to_cam; - calc_world_to_cam_transforms( // Inputs + // The transform from the world to every camera must be updated + // TODO(oalexan1): Why the call below works without dense_map:: prepended to it? + dense_map::calc_world_to_cam_transforms( // Inputs cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, // Output world_to_cam); - meshProjectCameras(cam_names, cam_params, cams, world_to_cam, mesh, bvh_tree, ref_cam_type, - FLAGS_nav_cam_num_exclude_boundary_pixels, FLAGS_out_texture_dir); + dense_map::meshProjectCameras(cam_names, cam_params, cams, world_to_cam, mesh, bvh_tree, + ref_cam_type, FLAGS_nav_cam_num_exclude_boundary_pixels, + FLAGS_out_texture_dir); } return 0; -} // NOLINT +} diff --git a/dense_map/geometry_mapper/tools/orthoproject.cc b/dense_map/geometry_mapper/tools/orthoproject.cc index 85cf1a8e..3bc19528 100644 --- a/dense_map/geometry_mapper/tools/orthoproject.cc +++ b/dense_map/geometry_mapper/tools/orthoproject.cc @@ -28,8 +28,8 @@ #include +#include #include -#include #include diff --git a/dense_map/geometry_mapper/tools/streaming_mapper.cc b/dense_map/geometry_mapper/tools/streaming_mapper.cc index 6f9b770c..59db877c 100644 --- a/dense_map/geometry_mapper/tools/streaming_mapper.cc +++ b/dense_map/geometry_mapper/tools/streaming_mapper.cc @@ -148,9 +148,9 @@ class StreamingMapper { // The info for each face which allows one later to project an image onto // the face and compute the texture. - std::vector face_projection_info; + std::vector m_face_projection_info; - tex::Model texture_model; + IsaacObjModel texture_model; std::vector texture_atlases; cv::Mat model_texture; @@ -298,7 +298,7 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { m_smallest_cost_per_face = std::vector(num_faces, 1.0e+100); if (use_single_texture) - dense_map::formModel(mesh, pixel_size, num_threads, face_projection_info, + dense_map::formModel(mesh, pixel_size, num_threads, m_face_projection_info, texture_atlases, texture_model); } catch (std::exception& e) { LOG(FATAL) << "Could not load mesh.\n" << e.what() << "\n"; @@ -436,8 +436,9 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", *img_ptr).toImageMsg(); } else { - dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, smallest_cost_per_face, pixel_size, num_threads, - face_projection_info, texture_atlases, texture_model, model_texture); + dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, smallest_cost_per_face, + pixel_size, num_threads, m_face_projection_info, texture_atlases, + texture_model, model_texture); // Note that the output image has an alpha channel msg = cv_bridge::CvImage(std_msgs::Header(), "bgra8", model_texture).toImageMsg(); } @@ -713,7 +714,8 @@ void StreamingMapper::CompressedTextureCallback(const sensor_msgs::CompressedIma // Copy the data to local storage to ensure no strange behavior // cv::Mat tmp_img = cv::imdecode(cv::Mat(msg->data), cv::IMREAD_COLOR); // cv::Mat tmp_img = - texture_cam_images[texture_cam_image_timestamp] = cv::imdecode(cv::Mat(msg->data), cv::IMREAD_COLOR); + texture_cam_images[texture_cam_image_timestamp] + = cv::imdecode(cv::Mat(msg->data), cv::IMREAD_COLOR); // cv::namedWindow( "Display window", cv::WINDOW_AUTOSIZE ); // Create a window for display. // cv::imshow( "Display window", tmp_img); // Show our image inside it. diff --git a/dense_map/geometry_mapper/tools/test_texture_gen.cc b/dense_map/geometry_mapper/tools/test_texture_gen.cc new file mode 100644 index 00000000..849f73fe --- /dev/null +++ b/dense_map/geometry_mapper/tools/test_texture_gen.cc @@ -0,0 +1,134 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +// Test creating a texture the way the streaming_mapper does it, but +// without involving ROS. Hence, create a single shared texture buffer +// which will be used for any image, then use it with one image. This +// is different than the orthoproject tool, which creates a texture +// buffer specific to each image with no reserve space. + +DEFINE_string(mesh, "", "The mesh to project onto."); + +DEFINE_string(image, "", "The image to orthoproject. It should be distorted, as extracted " + "from a bag."); + +DEFINE_string(camera_to_world, "", + "The camera to world file, as written by the geometry mapper. For example: " + "run/1616779788.2920296_nav_cam_to_world.txt."); + +DEFINE_string(camera_name, "", "The the camera name. For example: 'sci_cam'."); + +DEFINE_int32(num_exclude_boundary_pixels, 0, + "Exclude pixels closer to the boundary than this when texturing."); + +DEFINE_string(output_prefix, "", "The output prefix. The textured mesh name will be" + " .obj."); + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + + if (FLAGS_image.empty() || FLAGS_camera_to_world.empty()) + LOG(FATAL) << "Must specify an image and camera."; + + if (FLAGS_mesh.empty() || FLAGS_camera_name.empty() || FLAGS_output_prefix.empty()) + LOG(FATAL) << "Not all inputs were specified."; + + // Load the mesh + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; + std::shared_ptr bvh_tree; + dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + + // Load the camera intrinsics + config_reader::ConfigReader config; + config.AddFile("cameras.config"); + if (!config.ReadFiles()) LOG(FATAL) << "Could not read the configuration file."; + std::cout << "Using camera: " << FLAGS_camera_name << std::endl; + camera::CameraParameters cam_params(&config, FLAGS_camera_name.c_str()); + + // Read image and camera_to_world + cv::Mat image = cv::imread(FLAGS_image); + Eigen::Affine3d cam_to_world; + dense_map::readAffine(cam_to_world, FLAGS_camera_to_world); + + double pixel_size = 0.001; + int num_threads = 48; + std::vector const& faces = mesh->get_faces(); + int num_faces = faces.size(); + std::vector smallest_cost_per_face(num_faces, 1.0e+100); + std::vector face_projection_info; + dense_map::IsaacObjModel texture_model; + std::vector texture_atlases; + cv::Mat model_texture; + + // Form the model + dense_map::formModel(mesh, pixel_size, num_threads, face_projection_info, + texture_atlases, texture_model); + + // Set up the camera + camera::CameraModel cam(cam_to_world.inverse(), cam_params); + + // Project a single image + dense_map::projectTexture(mesh, bvh_tree, image, cam, smallest_cost_per_face, + pixel_size, num_threads, face_projection_info, texture_atlases, + texture_model, model_texture); + std::string mtl_str; + dense_map::formMtl(FLAGS_output_prefix, mtl_str); + std::string obj_str; + dense_map::formObj(texture_model, FLAGS_output_prefix, obj_str); + + // Create the output directory, if needed + std::string out_dir = boost::filesystem::path(FLAGS_output_prefix).parent_path().string(); + if (out_dir != "") dense_map::createDir(out_dir); + + std::string obj_file = FLAGS_output_prefix + ".obj"; + std::cout << "Writing: " << obj_file << std::endl; + std::ofstream obj_handle(obj_file); + obj_handle << obj_str; + obj_handle.close(); + + std::string mtl_file = FLAGS_output_prefix + ".mtl"; + std::cout << "Writing: " << mtl_file << std::endl; + std::ofstream mtl_handle(mtl_file); + mtl_handle << mtl_str; + mtl_handle.close(); + + std::string png_file = FLAGS_output_prefix + ".png"; + std::cout << "Writing: " << png_file << std::endl; + cv::imwrite(png_file, model_texture); + + return 0; +} From 326af89000414ca982c32d89bbf786c9384226f1 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Wed, 2 Feb 2022 11:40:58 -0800 Subject: [PATCH 32/40] script to make panoramas out of images (#33) --- .../inspection/scripts/panorama_maker.sh | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100755 astrobee/behaviors/inspection/scripts/panorama_maker.sh diff --git a/astrobee/behaviors/inspection/scripts/panorama_maker.sh b/astrobee/behaviors/inspection/scripts/panorama_maker.sh new file mode 100755 index 00000000..b0aa2a68 --- /dev/null +++ b/astrobee/behaviors/inspection/scripts/panorama_maker.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# + +# generate pto file with images +pto_gen -o panorama.pto -f 62.0 *.jpg + +# generate control points +cpfind --multirow -o panorama.pto panorama.pto + +# set optimization variables, optimize +pto_var --opt y,p,r,b -o panorama.pto panorama.pto +autooptimiser -n -o panorama.pto panorama.pto + +# clean outliers +cpclean --max-distance=3 -o panorama.pto panorama.pto + +# optimize pohotometric parameters +pto_var --opt y,p,r,TrX,TrY,TrZ,b -o panorama.pto panorama.pto +autooptimiser -n -o panorama.pto panorama.pto + + +autooptimiser -m -o panorama.pto panorama.pto + +# configure image output +pano_modify -o panorama.pto --center --canvas=AUTO --projection=2 --fov=360x180 panorama.pto + +# generate panorama +hugin_executor --stitching --prefix=prefix panorama.pto \ No newline at end of file From d64d24114340a5982edea14477824eae44795fae Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Wed, 2 Feb 2022 13:42:10 -0800 Subject: [PATCH 33/40] making anomaly classes general (#28) * making anomaly classes general * Changed gs stuff to work with new isaac messages Co-authored-by: Katie Browne --- .../app/libs/isaac_hw_msgs-0.0.0.jar | Bin 5231 -> 101 bytes .../app/libs/isaac_msgs-0.0.0.jar | Bin 28759 -> 95 bytes .../isaac_gs_ros_bridge/RosPubSub.java | 20 +++---- .../inspection/src/inspection_node.cc | 8 +-- .../inspection/tools/inspection_tool.cc | 15 ++---- .../ros_gs_bridge/src/ros_gs_bridge.cc | 49 ++---------------- img_analysis/src/img_analysis_nodelet.cc | 17 +++--- isaac_msgs | 2 +- 8 files changed, 29 insertions(+), 82 deletions(-) mode change 100644 => 120000 apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar mode change 100644 => 120000 apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar diff --git a/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar b/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar deleted file mode 100644 index e04d1d37ec894a44af22b116e8de4c8784f0390a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5231 zcmaKw2Q*w=+lEK)8NHVfCBk4{B@%;-D5H0x1Y@F%45Et?A-WNS6up;7)aWrrAB5<= zL_!c{v>{NKONI{Tcn*7Mxwth3j>ue~2VO(J4S0Dznvpy3x~061-gXWeO| zI6bvhjG!QO9aV86fZk7mh>m{e-v|JJ-P4ot-+|gt9d%U|LnAS5Rp*3vI&BiPDhCNW zKz-)R3GDAEs-g_MhKs3aRhU#zx0GGvX^7M)9^iZUA0j$$TdIiWK*FSQr@J`i^@O!>I~&3n;96~|LUs->{=R4myf9FBlq z&9NQGlJ-%ONqtKEP+qz4?1 zuy*vZcJ^@a_+>;O?AKd1Ps5Li0RYZlq778lm5tyI9%2X-+{44m$XR#f2JPI=rxxDm zELN=cJXQN{w4*IwXI^V|tgpig0dHqKX3o-8{jG&hE$6}b(L8WVjm^PdMZZe@t(@GO ze#q|Fref0r1X>;eyo<{3#E(x9#r0))=_=MhY69-P7q827PyUuLsw61KZ82j!bW3al ztV@-K5aG6voxZyfu~E+9J>`KKMeGqf^V4@5-n`6_%8=F?J)%h4N#7IjvL;M5PQD4g z6|Inlb7&hhv!`F+!AJPt&RB0ONcHld@wZ!WUFk!_NuMYjD`rTRsTB`^@i9qVq9)ge zJ!!cuj+eFFF<<4brq5mL97AMze!6QHzzcG+U2$-v)*bjEVTSp#yjK2dr-HeApjEYz zV%kF2T529f850;OTI)Dp(vY>rrZEk0xq&Dhj8|sHfG2pz#(7s9lnfkGEb^f=GvKOv zrsyN3hbbn}iZaat1j~@lHJ`56X4XEkNJ;^YWn5NM^NI4HK;uJ{F_3O2X*zg_jhyIZ zq(1v6gD!>DA=evQRh(KwdBaQf5&{Z>?v9$HwZDaN;BJ(xqcKMrkcb_*Yvd5-0 z@=tqK7b}5JpmozuY9&Gf@CJ8&!(vvuMNQtX{2LG<1@I=_yQH|Hk!FV2yf;1DTu3i)S=jTj_HKr6R(cPwoNWKzI>t|3n6cHq?seuXO*7kUvFCD^rBefmxWMI z&6YDkmubD_HZblpu&IcwK+4UweJi4P0~MF8AuqHWf0*W_QAWOY#YA$WfhY0ZU}hq^ zM|YP(rWl`85(sm4={vO6+3K(hGbNYX9kMDl&>4*y&UX@PUCXBh)-Qo+h)Fb$ZHO9-O>HoR&WsKbubYCphp4#aQrW${NWR4BW>4dRa&mj$0!l?#HC#ADQ?nz z6+$Dh*9DRk2823|zA2^FWUy{)q@_Mbyrs7fQYEM);^g5;QO+Cw*ap*!*JlYxkrzTCJsXAv*mrq)-u^sMcotGv1WIk5$%#}H~NqbV7Ic` z_&qezu|I;8t{3cKS7yEBLL#(iyQ{KLRlV{PU-pC=9bG{9eZdP;u}1j_a6+<8*CanD zcz2-UH;k0>wk-z9Sfc~X2%xxb@qoT!5AakcummkmB_f5}@_KJz%_)QJ zRK4>(9}n3GBJ^3u447tKU3Y2ziqhfe2Z)Eta1z8tlqJ7hz8F!mTmFN&wzxH|=GNty zr*v8^RU%>v(ft*dK|O?sr_Yn~LhM}gZr7UC^51Mk36BiX=SFRR-fy4=QFf0d*Eu*l z{~ojHDwE1PY8z2%>U^bBkw&+3*zpJfdQqFk!sde(*-;blGW5P;rmLlCk~|o05u4=H zqVOthF80|oA=ihhCF|i;hN*St@9gYcnbyp?mID}?Kj#nvACb4M(0(A%XQkOf$Ob9O zWlE0Y=GR4Eyl-<0eIp4;)>hlQzfeX&I;?pFN_$Ltxz31FHNcwzkp}zH(li=0+b zc5wuCCGkBIVaw%MipDVx`vIE0F`Pry_mAxoK^wC)fJ+#;V?W`DoT-m&k%1D)z`ui}>C6FP0?x z6?Vrmu}wd0&=?zC=E+q)|GF;j`|F*^AB%=K1aKd_BZ)7x6nt=}I=;IGhhM*p6n(xQ zSx6#QVH^j_#Cz|zx=aB5X9e~r5V3!AsZUr3uClsGg`oIsl7Ggf>N2**0^suYmpVwF zmhBM&OmEn@admMy`|Dq@M^SA( zA0T_A*5Dd$qI?Exkp(KGeqwTFHW&i^vEaG4WL?_!xJSsGA3??8#!w@ZiAA+!Gcrfe znz;-%%Xz8dPpL%s7nL4SOn_y^cvB=P29PdEiktvECr;aCbqMTw zJ$vhnN=zI+anQiD-}np=EC*xF5xpRCii(~cqkHOAnsA)gh%}5t z$BA6JJni+h$>qctsS%TAXQn9s&atJk0IcXcnX;~$+a;@dXe3Anf`p&JPAunr%abxvDjbQx#kdmN8JcxWHNnR|n+o zzo0~_&sse%yN=!D+NW5Pm6auLI4);^ZUVghW@nwg!loW&`4jTA;$5gtp)@{Uzs9-% zqkOJfEUX;){0vIbr%)<*|F}ik1L{nvKKhXH!eWsm+!9Tvui%WW9Joto>D(WE;`_!fGY`jsUW5yLOu z*Ht?)Ll`S#QII%2+RgQ3v(i6kk!fnS%p3TC=x47$1k|WBPN|f6>J{$)$18t<>7C28 zDkBD+4l|kC6m$-u^1nn7{UR@hxjVY@b9m7uk^)ql2psmfh@y*EPO>UTyc>IG5V)d7 zg1Ko>&Gr1D=d#;UZ++SC%ZIgCf_YQl$&h8xw|Ww+?7cu4`C}>f`IZ(3@6MT|?sgh9 z{cra-n>S`Ho!auQ*(1BBw`PJ<-(Uut6!!~H?#zU5U?vS;e_bN+-ac;lq-N zIA-VKnfLNL`tSqXo5c5D@->MY;G1U|z%0`CHsT)hLfLFF_Az##lYkGw{>$!yA04>a z$RtdoUb$~jBJ22)Qr$|-T4&>`y=0p3^Db7elVHrGZ}ov6njY+0@WkawKsuWx+N4hs zG6%QOn!{=Z`rArw+LB74B2rXylTh*Zp^Q_P?cKwEpAlPl!Im(LS#eXTk>VTb;-_N~ z{lwBwiyD}$fFPrvvirv^t)g*zok`yytxLSixAJ;>J)UL{yoip})Hc^pPkxQxi_GYS zVuo>uQmNU8u4H9VFYoeB_bqvE%9rlYOV%hQbBIa{QRbu@QpN{G`NH*J=$+Rvl0<%g z;oh9>241##N-s|EaKrL2-gW^l(OZ$y`2txKm}^^~_WOLlb8d0@ONW zNjYZD^?Lx_jgPkUwHUy$4M&94JXs)QyO2jv&G6Ip?F&H6_6QG&?6vmUZr`$1)s&6g zs|p>8$JuS#jWXUt>m^M^wRKz3=8E2g;mgaj9 zY3jZ7&Z{dQ8%aSawc@?CSr~O7*$nYTe<~N#{R=ZC>+*HhlVp8IaLFLzIFUo*40MWZ^n2!RYO64Ywpi*XC~I zKh+?bCZdOgmD*sN#gaV$C{ntm*-v%yRSVT91})3g9{ql} zb``PyaO3rx4X=IMbj;U@)qD50OBN~|aEjlU4h+Q@6f4cU7ISemQyT2=^5qwh!5pY* z1TZ`z-DQG`434dBvAU0vg!+dDpr;W1Yq1CfuC159(;7wpuAm-@8s8*fcUX*s5oTjq zAWYu)iqC$zns1lzcn!>2b2W_VdS@wY5E17J0I>w)yhA@ixOE(;#A_)sX4)rsj+-a^ zsFQR8PS=2WDxmwTf9#za%IDnufm2s9?uSKV#hO9-F!GQHuy)pYpyYtOBi|j6O;-YS5lXlWN{!eK*-@^+n z=GF4J!~(ryw+Ga#BAntENZVOxO=4+zb?ehFrw2>KN3-Vhg2M8Wo2x?mc_4dZE&S3ooP4YRMG8eGO)pEdgUxmdmCGoS?zA7x~+-yfbVO=+2;!E6FK^ zncvgbFG~0vx)j>KJdP~VYj$Rf@-G-$;t2?nNv-x3uKDa_9AG_h5gufh2wHaxDU+3+F6C1ZRnvm;mFi``4 zAk}NA3!S|fA~6-F3uRQ*mIacj9S;+@rIQH>L0xIR`BpM-y!{|2iVHWnP436CC9uvV zLm+ItNE+1e7_5_b51E&u6zY35>IMmjB$*QdrUTd6ixp|}3UkD_26K7|qD7W+5)2Y3 zENbF-3;dzR@&Zyu9!f6x`VA(Bt_JW13H;o#$lwq*apLXlHOO$m5Tl! zr(Pi-WF-3E)uK~uoif5o3lf6N=_h0d>& z{}Iw5KQHvls`a+&EK&Ni Gr~d$g0Rb}r diff --git a/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar b/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar new file mode 120000 index 00000000..ac4e1ee9 --- /dev/null +++ b/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar @@ -0,0 +1 @@ +../../../../../devel/share/maven/org/ros/rosjava_messages/isaac_hw_msgs/0.0.0/isaac_hw_msgs-0.0.0.jar \ No newline at end of file diff --git a/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar b/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar deleted file mode 100644 index f69d57841e396899a31e908019343cb8eb0415dd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 28759 zcmZ^LV|1nKwsh=tY}+`0qxum%yhfa_} z4Raay(FEr4+Jk7cj2R+_=_HrqTeX(z72LObegSIw@{wz3w&)*I3gTy7^?Q-~h#J~V z$){3D6@z6g^wyG>br?+`Xmr-eX3DVmE!7I2^125@@=(hJPC^Epll`rX zsZh6*AT;U%zU89oaw&3K2R55m9#hlxtg#u>!en>WB2%=3UGdeTtUYj9VT;UEiyj>S+5 zmvX|(+12bNwA>MWqFwIA>^j9~2`w#rT$O&qfQsCX(AVRjOZGX>rpiLMu-d#6R^iJc z)~{4)I~?G<11EfGP)XrA#^|rVWAhhy{|h24;J3M>zP{mCYe!SZ|ANttl=&kF0OK6s zee&O5{bKB7Z0KZeYa{sgQ_S@Lhf~Ha3g7=u%KO~kh)rk1a5FvRHuyk zH@w(g@>7W?0w?63dj^=JNZMtg0d+Q_tJ)4{KPHDqQ63v#33wvdHAflGt}QU_0|IUP z(zoeFvyi7Z6HK@QW*fb0=G1~5$jtSM!x{V4uUSpvxweFA{W7AZp>?N@ntV#n6qatx z`x`75N?0uT9yh9Xxn;&(tu(DY;4Rwb+p%P#Wg$6Ta75W*o+}3*8x*MhPkPEQ;)*E2pW0e4{rMIiX%`Os=hV2F=iHsjbom<5}%^ z0skFx`JB;@U;vI?036AF$5Fz@(e6Ki6tmU0`d=6&{(><$2P>Uj+PFAM)M_k*R-G7K zAyJ4loe)aAlDefys5~y0(+)AFKLFH@GHACEEW9`2E%SBr!-((YZTAkohpCy(%FN7c znKbqM*)@f|cXlw2kA(yPqqeCzCL{%zV-+bnR%C+~I^PvFJCsR#^16%muq$s%1YLk; zPlMwm1TUGo4Zd!e)wDOFKVwABY!SJnLD3T^XD6NXBr-Y|Xg~<7{u{0I4&^CYA+-ic z1yjq)E9f|UQ#?^`Od~9~;ah;c!PSV0mTq!#I%Zz3ZF!UMQj38){v^i9SdIMOiQiPS3iqZIYuAAw$Sko z{X34z;znE<030Defq)o($5GYR%GuiZPc%i1jg1WS4K4pcG&>7Y^rvye~LFvFX z-&ZZD(K$0bX2_2FBv19im%~!UEAu6oF#`=rrV1Gbw47!j4~2=ycfV|l5sF@CTE6g@ z7J43hylwgd`D|N)^Pruo){526A~47T3N+F3K2!r*ill<(hBG<_YB61dnpDa-mJCIA z@RMNIri1p>gAl`I#WF>yh&H|>`wmxc@d_uB!#qKxxcZ=2k|GMM!MGPNtA~iV8Z#FX z*rqZR&KRduqMLl&Srpmeg~rd!H=AyM*}xO(<$W(!y?&ekVKhA0p%C!3@r2P-&$8Bi zZlh1-!pA#xQ>>fD8Te*S4HEo&Hkoz-fd{2;1DB4g4go_rD{}B)Cy`X(ftG`a)3mXb zyeO)flG!D+bwdE0?JEE{uLHkTVOWFDBM7ZRYi4_11+E_GPrG;>Onk~hcb*FP)Z}f! zQ8HH)DLC~rL?J3l*PKb$nh#Ag%9S-*cQS$*xHrKgd7(QJSyfp_^}!kXxH|v&uq(tt z!Wr9H;0REwK0RVpCy#}H*XyN58&tR1#XacnIM<4wnMD9_z5>)M?e92CSnHeq7tH_3 zzQGAAlKldxAs_Zf$PeBrNEoX)j@w0UNLJ;*8R9tLHDisRn8;*ES?=t;5E;CH2qZIQ zH8oM$7@r!v4sbY4`S~|DfHU9UoVrV+B_q)u=ifeg#N?6?0>N@SMN{bzu4D( zDj%OgFHx^pqpl+6mX?zJ7P0@99TY)!q>3T}fhYb% zR?O3S_^succk*3w~ zSZtRFQZZ5=YPT&+*b)rZHvkF-d5&%vI-K)KftK}YiJ1e7EGToUiXr~SF9FIT6NELC zP{I^BL60HCHKCadRug(F^tEy4FADxq!MP9$YO(&bk#x4|Jl;ansQF+q?@;Evl0%LM z?;>xFxe|7m_-S`ZRVlCku||z`A)bUVRo+r@y@fH@{4P^T%n{^RJ)iMj_lu2W69$Xl zn8w6W=5WBI2(a^_X={$x}-9_*T}{ph0Gb#SzPgZI46H+dT2%&yvJw z>gH@OKYVUJ`x5wk0Qz(Nd;ts7(V|a50K-nUDIgbYIi~;M zD|#+^w?mueNE0=EcSRag58e{od2quTC1U#8W1ml#{*)%g7TEW-Mxk3v=aJ8F!xooNDr%r>EK(BPo z`YGyri|MJl@2Yzi(Alou#RgoT>%mdfAv^RN!sg0%);N31O*Ig=^i(t^TAq~_AZ$pi zwyOj|V{$*OJ=$z_Tl)vJw)ty?sVr-3V}!YKTuKepuwUZuEaA8j96N*9ZUF)96Lr$< z@wj~*A$gQPeYNj>dqnu10HQRJ^}+#%Wf4FC)W08=|1SajyK4t0jLQO4b;!q+qUQ0s zM;SJYF1j-bY|0QgRbzrWV+471dR-P_NPzyd+Ley>BM|>Ld(3h+U5PDIlkOY3$+VQE z5>p_^<>}h;ylVYuz50`{S*u{M6fLLzSV)neyS!5UoSdYoHsp09>X|zAhuJB0lJaMo zF5~uin^!-X7on+vr((EHZocW&)wvSa`?wN<=%Z%LDk72OFFrz?>YyjM^bwcH$)_MM zj`soT|0FHkhZ6GyEs8}C*ww&(SjBo$yoP+^Dr?Bssd{ND z@uhKCOMd{G{k!}x$c~(cZ+Z`|yJ2LhI5IsQ-AM<(cWzMnAXdyQVrYEV!+4L<{lAKT zxFK_K0iBJ(tH!$HKJ91?-8#!1&!TQ`bx0nw)l=PpdhE`_|0Wlijn!w~Pc3v{Z= zF9M9@oAysth9HU8-$^evG56MmpXS;FW`j&oKj1mpdJcuLQG15j3m>w z$gB|}z=^B`ktw24fs4u`(pZpL_i4GNsNPz7A41+1L`+^2kB6PXAeUwF@i_jPI8FF? zeOcNJic`AwisfA)k$V_vfpEu!NFiAx+=S+198fMT&^&EoSN2QPkTI>t zGtZ6Gzxa@|1ZhpD;6|8XRRLF6LJTrNTD#7j4<%O&Wo9LcX~eJ`Jx$zCD$IRAG2AM` zY4Qu!$^eiXjxvjNDl-uigi~zH%$yWFS1AR8ZkNA|s(=tjoCyrf=rnk{jvlz5cZ_~u{ zTU0jdcC6yaK|2o7hW|X_f#DZ0gZfFD0QUoEBdvS4WxER{(=!psyNhGka1I`K4bm1a z`QUzxen^2p`T?09w`B<;#6TbOUc)tdp0o`ec^rn{ki!^^)?7mTf$i%hl#?*Va(OLn z2I8E5W_Xa4&SZI-LAISrbKDoXucX22fxk|wKpNZG@?1-YY;qu>VhZ^A> zt@+P{Hk}|`5YAsZgZmS)tH9x34A;65yg^U9ZUV0cU~FRZVCDVAV$jkTFHlUqC~$OA zVNP3@Zx{7;c;-4%36H)uDVx$7gUI#aWes&a6<~6)GggEqA8Foi>55F>B(qX8;jYJP z1@c$-z7Nz}ZGOjTuh3bU{7_3iSB)v-vh&E>hYv0G44q!XzUMt5?)-jQS`zb_2{Lc@nPiPx&8JSFU(U&V687hJbs%mAx!H2M@m>8%lR<}Y zQ(38GIXm@{Li05fgxjmmteHcllf?svCou4`nkq2}rV-Fr@8BhZk)=JNci$ieM1&Fi zmI0=4M!*Y%FleWmj7NrdNm}sM z&G(Wuj*s{DHkIRxB$ORDL|$f{@J#+RMtG9)KGF4LYY3Fx(q2c;1qr zPK{jw4A0UmVgb?Sy3#C{S-s0+;~MD|4RWBmDGa>p20s`sNDbO?a?+$9SEaPZ*YLIx zJmdKm;>(yG0;UYs?nqd{7IJhvQaVyaP9w>XOuFDs8Wwe)rMaA0b=G-F_D4c6;C zv^p{q*{_s%xWcj|UXg-t(Xl>5b8FYliCL@G1l*~CYo{NuCnPEiqXS`Cmxv^5RGF8g z{4!?C^9B$r`w6o%LR-cDg815BOL@G)c_941fE9}ujv5Yrkrrk#P@MHkGmR`!crql& z-v4aYG41Tl>9{sII!IFc-0f-MdX~MvX3crjW~%4hYjGs%calvhtM&j22Qp;UXIO2x z8mgKw364+c?+W&!o2zA;BqA%~&2bSPrZlpjW1cjHC&7kXu~Yj_=>_p4y)-|#5AoWrkz{lL4F`jVpIXx_ z3l1xgwa_9V45~FKE>@kEDV-LdCEl}n8knCuG7ADOHSQ4fQwC(I@*KJ6;4j@bs9zEg zNLj*}lq00V(BHDQz(GCP49Y<8`zga` zF5D4Z2b38h=6Tr{FI(#7Q=Ku=mgl57MPYY|EUurWejuX?~rz2H3YAUStjO+G+ z^{2;|QY*?L%9y3e4l9?nkwhzXAG^i7w#1>OgUHpYN#8zs4m^kD+kp8dxBlw0jfIO^ z2pBYy5@75(fF0~@&pNP$#(OEoMJj0Hez=Gnhik`eq7~v#(1>j^NMugaT$fG$z+g@0 z25&q3#@lx6JK6QsS8G+f*dtlBg7XpH0rF!>Jn1w6Mq~Ic|i_#!slG zJ&o5@qK7qNrF4_$Omf&}scfFT4UrPh(}&kQf?JnSIkVi0Xd%|E#^z~Gd-1Z1md*5z zz=uh~!;BZaP%DJ??ZZmTQaWWz_gz?oh1Z=8hcFzLb#!ydhBD#ia>$S(cJ(5QlPz3$ zqDm1p^nlg1azZJU&9H|kG-|^cj0mnh`@1bx6Jle{YI;SqijeKl^kS|IgBZ_a^jC>H z6f?sO_(37j(^QHuA3ZiXFqz|GuTSm+Xhk!cH=4iV-ZFL|5+7)1KLHc%hM<=BvivX- zO%xsbmd|9FStVC;YYM)xjKB?i8$gr^j`!jA3fDQq$a!?%4r;r6)^kP330ecGyT%G` z^Q>gnk%SrON0o6EWl3KT(vbmuFG^{o)T7O&WJgTTRQ}Dl9YWdP zsLcq5>Wj2$?!iQrABW6L{%nE;k>CUzj>mkffG>Wc8$D|*s@@CXg!urwGoq>(&5(S8 zAN(+NG$iXu zs9Bi>;l7YMzZ*PFo!_@=LKJNDh&*Ukp8jT#7sNVk&?!P>^K83x{Vl2L1Cfp3cZ0Ah><&bzgB-Q_oTJf`>PdazL7 z$m`*NapxoR7$gy=0DE$$a3wI-WH@8Z)UXs}t;|6-d4u8<<#e}_;tj9*>6S5PGy;<@ z=$?QXxG`NQAN7!AjAwoLji>(2z!^mAt{Vu@8jjlzA}~-h$gUVXw!#>Dh~dk{3ulX- zT6KVHU{orQj{Av0a%TJ^vzDuCPpF-|h-)ENjf*8KvU0gw0=p`TcoD5Dqsr!Zx zc4`|J*FLe&yVnrQLW?F2fmmIoMI4%)6bYHV$RA6iB!USg7a^cyWdOelE7FKniB zh|Ur`mu~TAA2<#n#eKU4FG$`7A>I&=7)A@U9&6YSz3X)x0HOp(=<*f4=+|rZtKJmB zsG%o6k=z;dUfP=mwbuHW@9$*6aZ4*I39zD zfBij*W{Z(3O}0}Piq2)c4t+outYR^png~mUitq>YQu|xY4*(dj48_6ZJ4EC#`&}266HLS!W-80E%3_tI5J>=C}e9_kx*zk6DR>DqnMT-)$3uHp>JPMV2B#rIG`Si~>(d0RMUD zV;qpe-K9fk;W2;bwR$H#jITSp7b5s-k)RUYbW;@ALgj0KKg@`_FO1sfvtHCDn{^6m&|&Qi7C z`#$jl{|<%ur-x*7z=UBGAQcIJhvHxBo!@3r$})0*SyUDo5dj2DQ661m&+IpLuEt`! za_UdXXN}3oJQ2dAvNH*^$qH5^HA-c^RiW>G?}Q#Cc)qNu&Akb1J`J`TF|OHfH}@|H zK%D+2Sf9o(i{WgcX%Suybh3I(ULw4WuY;aqtizN0c3(HCH1Mit4jxtRqu4p@On7r4tpc&E3hi#6tfhfckdEqOxGk z6B5)@u3w&9);wipm^;k&9jX%Nmw-9Ws(ceS-k@}$jh$zx7$1Fg9fNG{v>A3P%9D1T zJ}YEt_?M($*=hKlQ=5Vn7q_h|%2)Q=Ad)>!Td|3bUxr?DD)zD6OKyI6<7#vK#ff18 z@J%0%ZYuU-D;BTgVaB99sUajA4iv5CGP_6wA~stk#uvNDAUxH2R^OVhFampqvMQbJ zgDt_#5V;*Np;qol@Sm-SJ++~J*|jB#3fUarvbL3)D9jzt#S~#x-GG*7-c?Ptmp!23 zqFdipeo~PxFTH9!THk80D8nbWo@V;)i61yF~k z%ELoa$z>87kG@+O%pHqJvZZ8cGy2i7fW+jpv^l5-!eIq}iu7*Py#;>ewPr9x2#c^z z!ZLZvdgFYlJD~lH-|Y?QXIZ+p&5U4sa~ad4I@NAn4)zJ>;gkkNY{GHC1+z8#CcY>8 z)RWgpciFfgT*preq=QeS$}+^nyja1H*&k1X*{Eqo9u4HnxNONBBpka|c{vB2%IHpn z0O98ONd2mfm^hRbN0F>2dm2$~4VF$>9u?opyz{7hKBSRq96+Yi^h2`X`KATa=Mx8tfrA?qnLvBzk{hm=mw4t5Hf)JLT$ETapMb z5ohgpHB>T`)pt_NWoOqQSlkg>yVn((3lc7WloS^B!|1mD!FgJXl%t9FF1{YbJ5Vt= zFZy;}Ge~@#aKjA8U(5OYI1<0|zu4|XQj@buM;;gFFW63%&DTxHmFG2+RsvPENM}hx zB<-2;9M$`bt_e|vSx@tL9(vduK4;74*L~(@q09|E!G=%xA!VoF)_&ifr`GhP>o82_ z7$KbVDEV$yTszA&#?w3zeb5odw`mo7?q;3_uk{|K!f|bUO%848ioWe(_C(c2N5jmw-xF^=ZDWVjTOAuXZ;3l=yEYap}B6{Vy_4(Np0_w*M{9x6K&IJ}2)a}`QXHfo6 z%X;S;3z9Mw$4?`?f_lvptXd2F^Xf&WQhCu>>E>pG>4V1}g%3D)tP9h(#5TxS@40zV zt3?j^U|_SVGILJjeP&}8KO*weOht9*>N~5_iV`exjmd2-F3x-!$H`c@1FDk8(UcZG zB_Pav`VRH8nx&!`-P%OmSSK<(@ZvKQ6FtJoPgojlr!^vfD|WS8P&F=_M z8Eba;T(hsZQ@dwfm?B(g2Re=`A2$;V(6l?vFF!9X89C2h7$PE@vG%rKpf~U-;>Lbq zt_E3UOw^0pp@)Od9KEa$7-}m*H04jbjq9tk3MMEp=e%9;@B?wduOiAaI{L zT)PM*zt|LB%L*mqI5O?4zxhOT*o^JcKD7cuvmn3qv`w*Y}s{l1D-T3nfb*^ zbz4lJSI5y_9m$u8WKT0!e0V#~&Jt{hM}CLgRV33tjfFST zO}1|daZ1V!_$oiBhNPQlJ0mQGv@_~4Imnho7i;!AOQEkTWwxjA^8?u{(3LQ>QffA7 zElezsolfBJ)iY_Y9Lg(P{Z)G4!PPT}?$8Oh)xd|eWM$LSA3G8AFopSV0MU34Xa#h? zSKglswExbe{A=}nnMeQ%4q~0>n+^p$3c;YBPfxp3n2d}=W<#nvLyCn(GAx?SRMYQS zy9MkM8djb<-DppWzwY7z4NE=fk-e6!xuM3nZ^ zM7DH4XDi`yyR-q32JH*&m>V=C7*1fwy3^m8hM*d-eZTRqnbGc&wQ3bD!MF(I#E2&E zN=4rW1D0#vK<8iF#pRgOpaU+bgQAU62NXCbMpgUrU)|6-Onv8d1rK_4=yx(F6q&SsCZPr0UbUV>gUs+7YSu{E)2%LlC&9P+~ur# ze+_>1)C2FYh)SmHH~J7BM;uXpa*u++N866FX@kxc`g98_M8wKcQqDsq!0D3C{otd=&miZ@X;fjYj#%ZLw(%AL0I-juYHZDn*h6{VgqN}b2atEQ#@ zbexjc5wyinth&EbcG<+G=XB1%<-RNAx@o18F1QxQKBG_~?+0tfsZ>rG87>~P#)QtV z{qyBbm%jV)jZwvqBfivGQvnfAG&U>qniDU{uX!NCyF&=PRdwNM(Af)a=aAWAMWX6G z#JRi3$-^x=SWBv(|LBAt$(=2M0Ki%SfRX+mfc;a5|DOLiCv3|0F=7tNsbEfg1i(ZT z58<@hW9VWtpmR8dTR{|us}%~dm<}aZ&OKZ@cU}T}Q5r7grBceQ&fULwAM7*|WdES^k5nRGDW6f6&Mn<%;=`Bv>w0ESfP?blh>X9 zuD<~90E?amn#mzvveG7=C1M-|3SvZ2dn%oB1JU_4U%yEmeTTH#5iAteB4`|AePpq1 zVZrhQwx;J__p6SzKjky9ksAvE+esX>s6m$M_Um$|`2h~M>FlZf zarl-d@(Kp@e`uSa_72gSUu;V(LYpN(h^9l4M6qNK(K3_&9Vt?y>^))PQ~M`Ubk93} zv_YV7v-m)7y>y<@Z9QYNd7>~^^h@Mj$mw2v4iG6aD2bK4^(6sBiu5@b55V*N08x{G z%t!}o18A}~hH-*spr;P`C+F(?PEwQsW&`ENS)25gxDjy&W;VNcQ4ZTJO~qqPpOV-^7w$q=y2O>ZS8 zk+qO9modH6@j}Dk_d_toEk+bqPM7}B?Y=Q~nDp}Y^#W<_5d=;Gae-YXQ9t}UWa4oX z+{N@)#LwlgkO^6@16_EqQC+9$j30&qk^1T4S;#JT_imfWDatnxdRtD<9jac@yGp7( zr4|MdeG`JDB2w}JlRFGMmmCT1gi(J&PG#i9#J^xP6xF2wgiK?#NH^eR*0WodQW=qLT+J*4N3rQIkwxJL=ku0#P5zc*+D8UKU+X5`G*qjU)9u|#OM zScVAs4Ds0>X%zjCx;^s=;)bbUJzx$k4dl+8SsL$|b}^l)61tjmR(#n~=L65BX3G}3 z&FVSPHA{u@B2{ZnE^e%y(pAQK`)O0Wq3_m3LZM6rT!4TH1!1a^rS7_Ge1Cw)WCaQ* zr1P_UrUwI2j?&uzajpaOIJa7Dc|#Oi*2(g8TR5GHNNts+tIr?x2t}$skPCpa9sniP z@Add+fBcTFw>|>JZrV;DU_iQ_g~W?m5qhSVwE;$rsYm&dRR28OFr% zP)lAp@gIR-i8c^jptS@u$X=c^J*{+dP6)mO9Ftv;n)PY(Rem&I-xz5dCPdKLd-Nmq zPwmoqJQd^OS;Ib$8u@nQa_mSidY)ynN|{%}JtR3dT<6P#>GwS>#4O}6jvse~R`)wT zpAR)$c09vP&55nxras?39TpTpZcNw13sEF3lp-gQvOXZR65Z0xtR6rb2Cz{7+E)(I zuxVY|CC-^5ReO%iRMp56qN<@EyuHzS0TSYv6OyK2$+CDLqS8M(Z@Su#9z|AHD7Mis z<4{VXO&VEGo7XIy#d{48%F%JD$DD}OK&7xkeBteM>+ifF<%iGTnvep?2{yg*b4R-I zSR`{XRrg*!(p?I!rE$+Hn^^qCsdL}LKm5(kX{6CPZ)1tRA#@npQ_b?D;Y0WAMf1g0 z?)>|M-2y8HJWGapFTSP;S>@_`uNSdX@+Ck9S{RKRwEeD&-=zY;}s!!6QzliHVBoOknY&csR zzCa;!pusHxxAp-4q)?@R88;k+@BYkPg_UebaVZr`VYJ)$ZuvpTFkv9=M^(gH3FIDa_??>l{00DvASH3|VUjyGcrrW=D_!vq*QN-99 zG;bG(&9A)V;DcDq>LbR=l!$2q?GCUL} z4Vv;r`v&e}!?1%CT}l~sYj0Tydx4F9EeneYBB&ZYOdjKBCACVxw0sy`5jk75cD_Rr zD78T7l}!okK`HBkpWsJWs;zb0s_!w1&!J z@K!>QZIDKbcMGX^s})sw@OIFzpqdsA(MHNGlck8a%IGrR>$e{!a|g24n=utz=5hVq61ZiSE`UC zv}S+aryQn;i9sYI{b3$4E1a@Bn_vgCtE!aplaH-sf0F9Q17Rt5b1xf*UNhS&SHxQA zk5wsRt5SaroPF6fiN;CSL!XAF~@WZn;g`F|OB`{*g77%&e-+b~2MCXIqfz0E%wU6Y_5S=+% zIn_TeK+l})XOx@^Ez3fqkdd%g97&%a{MbaQMP= zPqtQYGkgnr6?QiFyfyhu^IXi)#p8R;@q5h%1k9rN0cv&;{J*5qA91dKOC@V%E!)3W zWi6wb&LFajMoy&ffMeF?`Xx{Q(qd{b|dQ|k+sME}J4RPYpg*(qRXZoM@K3gmk zRTX~H?1gY1h~0de3_jTWcz+K?yk>dQPr*G+uyq}E#c%UX(tYQ;9@%Yhy-j}xM{CQ$ z929w9B|(5xG!qU^o6R@z-0LBEQR|=KjOt^JD&V{B64O+`-FCk)f^36com2R>>N-Wz z!NqdaZh*gX-Orwqn)}YUk=Vxm@1sLr`yC&rlwG;4+7=5@(M3smP6& zQpIf9cT*e{OT=QALwt-;L=j4yv7y_K*A!6nvyV=w(d2^M*nMuuIl_p7jJ41~2yIq? z8L$H?uw(O9fR>CHW88w~m6v&LqVx8`)%aHrJO2!;PvLnX=A21OrR7F_w|*A#m5R~@ z!C@=d6A4YRgsCYbrP2AK)N!ycSi}}Ox!?V$v@QEt;FSsOMOd1>A@seC>b{5o%f>ht zrnQ^gm{5oe0nIoJVJd^O!InvGVZ&rIU}(aZ;ioPXZAsh8@B(o^`V;@Bj? zS0%3?YFgTmOLIz`+h)*eTBy>gsBRGdATS=Rpeo-` z&&U;&NC=&X-w8jrxf_YvwgHhSfmpOn>F)*-$cf#qDr_0W=LYQ?G*b%+ml$lb?77Ca z8nMS7udXVN*^wdIlfW%_KXwT|)w7X^Z40zl4b6mGno8Z)y#JF`saMKI8`j+DnUeAp z|Mj$*xX-+95tGar<9cx&wd3hiP+2p)odi54E@gAf>plM3Z9deR&lz^jY0am(&Q<&U z;}?jS3^^AAwj5o}Rb_=dog3mai_r!$Z7twxi%AKsj^4nuv6`qXsk3{~O#eOncUSR9 z+^rA(?$143Zn0afWS(00re-ap?^SPHs|YQp4B{30@eHFRLUwNvye=mw*Yj0VBTF{Z zKD)J3AAh)039`+(X~6s$A8;A!kNn=>GiW6zeJA7p&F`Hk;&7lYLWK1qiaVNTY&gJl zEmzgGk@}M+PSOtfQ4F2*LWH<*M|Gk%&vrSJ?LIC%7a&ZR9HBUQ6ej&JD4EN;8rhsF zxxmH4J6rkw{hB zyQ4_KB?gC0JKyA7)5yIuSKBdtHN9@6?>&5O+RHd~_{=;opDS8Y2Ma^qd7>RngI&h9 zIn&Wt`^yY{qF9uRx>bB=o0rVUWD!Gq{x?jfaM+!YD@qQLksJ*pe3A47Nt5_AZ65@9 z1FO^@XH;Xvil2hMwwd@Vb3Y+uE#-?R;vW=qQBf7xE@C!X{d%@e4x^2f=uah@KxWb( zVej`jtp`0PQm2}0#j&`{kAUdk2~gx1?fp?Ga=r<6(zwZl;Vp>iDrHY8$s0%esZ{Mc z4Y6@ta(LA2$6FV~J|p%}=UQT%<4tW-U7M2MoS)PpFdf|%W9QI5|Ik+;YC;o*2{2-# z#!x~}+71Yp;R9d{=3?e3qE1DVQl(MhTUho;`%|fNm|S2Ov3cVoHgOu0rEf6@{xPCY zNVMH1Sw{K69_$YN?1qT$%DTaI2-<$bI_OYQMFe_+X5ZPzcIBl^VC;qr{PFcFTKy%t zL^RkEE1s4Z;a;}ph`{oKy7egi>(x?y^)7VKCANTD^9bBCAu#X##+usS6ZsgnM`>iq=RL_3FtNaxdp|(=?zdl&g#^Mq^Em$`A5{wGBkj`9am}`3+I8#*DGqKI~T+Md$bPl|ZM` zWA~J=2U$3x-JhB?Y53h(oQ!Z;=PkB*I~Xi!El8qdqEPbOdB4dDI{~@8fo!OrynySa zQakl;R74p)WN!H%t>!!yvSQBu0PHF720w8QtYi zY4-x7ma(R$%F;Z1FGfMhD?L>R4?o>et)tE*aCV9|T%2#8k@5dj4eU+QP#V{1H8JYg zDU$H4;Ig9w#>ZzQQ+GUAfPC4{{?LjW(BsMfWyuF!=L{R+*|r@?;MpA%2fDuzEY)M9 z$E1%@XGb`y%1S8h9M^EnB_iG_P9olvYDh*{f+btd1M4oCX?HLv8#A#Z>>Z++`&H z)K}oI=Kfpdv<*V8pIVXMi+&Zs?>;|&M(Ddm*d4fy__$^19XN2!QpqvAu;1y_;?Zw{soT06YbqJn^PP1i79U7Np{(oSdGDEOUca z>#HFS7$3wFE(68~DfY?a_BDjDEXkrhC8enp1w|B1(a??|nW5O|kqG+4%Jma@PkX)i&enHCXgfP{2l& z0hC#?m})jOjV#%eQnI=*eNesn88Nia`Rv7WewE*|p!k~LxMNLonKu%|aLh&KTHaj# zx%+f+v)oz1mC=#YNTNkQ<&cQ26Q*r*77SpXz$02i5eXOyMYVq}J;0VsRXM5WE@zP+ zMqMzyVEZOj{-YF>`m=%T0WZ1}`=!JIak3M8U1d3xs?*tl*zbB3p<;AD3IMP_z+?Uq z==#?z_J7UgxC20?g3ocgB#E(P*|c};S|wn0ozgy!81*VD_mxSzf04kiVMTNnAtD1t z2dX-ReU{Vt%UovZ$~Rid;nTC)H1dSS>*^d zlvpg5k05t&9)_$VX3}sIrr*7tQrg7B)evASFAR3~#g6YVDYNbxnfZw5EJkm1{f92$ zJTYEMTyI~bpp@I@h}YEJlDb1~-d7&1Qzi86!Nd2_;XBnhSi1zw*LvX1XxXqWkPO;D zJ9-zTF4cnL`qY0^&E_sv;~1PbUg3Jp-&R(N1H{Y#ncUSoE(U%bXFhMJW{lQIZg;N= z3p1H$&a_21l=0-5<`GDczjh%dd|BBZjB{7L9PG4VBY}B8Zc`rHR_7W;vDznxRk&qvb_ce))D23QK2B&J&%xV_(adsDiKi-Lj~ox(g#2#| z5~hjvv0}= znasL1qL^T)fy!RzDKVTtu7J`13M{~$x0Fx1ispsQ3d+G3!Vm+sUjssf1V=mdjm2+~_`FuX_*3tQ!cv+k(JpzUNB8 zud;oYI}RSH9h-iM8i+v>4yn({ToI!ny=KW@ZZ2=QeL97w-V|8X!FD5IKfixvlXc$Q zw1>w&v@mEE|@2lSebskM>frMbR{qNAOZ`vWK{8d|5Zldmr}x zb@~n9IeGFQhHuT?2V%;+9ctdt){k%$zJGrDL$N9$G{`yvYPbyGIsdrx`{$hKKTYR9 zPH&YJTi8qvc8RqtqaQe$|{DRudsbDdRM(kyqE95GFs4=eaRl1+Ym#l9L^In{Ew z(;~N&!& zM5HHxu5C+o4hhb+lz!+uhFolsD(FOtas-d_QaEh^4X$lm4>+8>K2u@r=-b z7pTeLD+v?^8nZ<~X*|r{Yz{O^K=fKLTpXw~s8@Ht$dTw^N>@S* zYLb}lY6NR_Yar*#@RsS#3}<@HS#gJt9*3(8UUMRmGk_DXP5T$v?;1r#xUvc+6RPVX z{m>(}v*T5-H1)$pW+3TOOk|Q4I%=PO-3%ysz-j!pRCXXA1$;ITr8bHIr#y>_z4dsTb#a2)&sOx=ky}CeWxhx5a$4v?lb>c$ z3GJZnCpIDLUI=~PY(cdZKq15;sz|=`4YPMq_BH2hZpFV1>aU9N6l!~K_DGG#Q&Bgb zDDSD)xOE3up8QA}VR~Nufh8?vZ+wb*{U*lwYXr2@_0m;ZW!Ntnjz=b2s4J2&-=xjuvhVE1 zN$&=f9Yo>R5pv}}5LQT|apHTQk+rh2LeR3=4cdd%Nhe$dS2K2*-t#=&erN{fqP6r& zlfrk-hCk5|?Wd9l4&elRLS5(h%dEUu{F)~dQeB-ZVUYDbz!`!I)B7vS-4*%*$ZbCSS_M@iCE?4rByz$@QrQ_ zn_paf*8e? zT0?XN4@dL~jh13hrw_7u>w*Pr+p;EuUMpvAjHZhv;OFwK8%A#I?bikz~ z?y6UcBLvadx>?#=yC<~DgC($UXS*G|IVPpbXIR;w3v#k%8EFj24>)AiVjQ2(SqtV= z)uW4VB}AvR_UWHF0}rg7FuW2%G3c6c-!q)LuT&}C^kqNh5-MWcX!Z5KOH=$Dj<}ZM zzqU$Wc&CP{doL*4)eDp$K5DR%NsxT3*Mt8KqPz`_s*m>}b%o;3d@LzDe1dE>aYue! zmDwHY!BgTYzd3cJGmqflfpF&|`}xZoWeI_Bo%pm1kl&4xTk^!zwg@u;h|I=|LSOgm zA7Q&sZgModfVRO7XdC#y>-&F6zkl}&W93c9d3MC)#Gc@SMpkeb+qFHiw6<(eZ%WYq zK*!ID2;u8oJo?w)W}fc746H`Q9b#UV+_v!EOJ&vgArpEHV>?$DS~jcsk4}7Z_*sVD z4W_tH?L+yFYB%{@4w$*Gp?q)3N=gp{B((%oD- zq@I-i1lcALIgm}v4lQSwl~n)iCPa&v zA|k!-o|L30q2inh3Ka@cfx>q^``Vh?_}i#&W}isSa&JO})EJrf0s?+++|VNxxTr-o zTWBG=7pKE+n_m#D7Sey@P139RoZT4I;)v+%Gs%x)N<&Hfp<0c2a6(?RMHj7;AT_(V zg6dHd%}tHRpOP%-^A+habLmsl=5dJzHMlMK?^7b-easG4x--!o%OXrjFBaI3ouu8Z z#FYRqBlM|bY{1W9FwnjUd`?NZSsOVyRPDAytezJbRVc)OUM}o=cZA34!A#pNMsyL8 z?9M#wC%C&=Mc|4p{7ftYX8%NOg}%3QAHF$JkI~OK>%MZsA5t%L_o{w!#L}~cNU)yQaXHFgyc_@(Ce-F3xh$Qb6QGgWR-Dto=~6U+lR&{R%kBqvzlAS zKjCY5G;G86ar2|I?tVs|4U*EQD*s6!3OO2(aT$d~AMlzhjVYQ!yc&h%RHn9&Bd+tV zG-Thnsa#5r;pv#i7M^JvmT47$uNR zE3jcPA|VnO=}G@;P;|g>Rfs#p&Lmf6inQHkwOjgYrHGP?t`E93q`$-E2Yn{>H>Qju zTvWP7YkA2dliLI5H9A?N(_FSA3W&G8o!49|-G&|FF+7qi!TcH9m_MhplFa;^+NT-} zr>SedYQT>2m{4lF1UM$uh5&it^EOwKfVALjTyYJ%qlIutb562jA`n^Db@ZIb4 z4!RM4HOi>=Ly<%hdkP)9344!9K3d>mfWXGkh|sBWhAMBUk4o~RJtAstwV^S+@uaB_ zm2TYU!>6Jb-sd1Svt#_Kk&u^B<>#i2C#KHM)?4qqE>4rM_ug(WM<<&oYtgEKnViN6 za-6`ApVei@m#q!hsva6Qf9gXAPrMgTi)4=>Tg$Nw*w8fo=%y)X|0HLVeezC;#cL;w zjllg6^W-`$yqZmfEmU8YImafA5ShUfYGnoUFLMqP_tchj@LkXHMun9tCpX4&az1A3 z@~x(D*4K>b6}A5ipHChQBWe@}TRJ9|4$)**u5Fga@s-+*Q+=SORU{-UYN5gcX9Y(3 zS>KgNPbAvS{V*3;to4>Uh8JH;7)MdLgQ-M`i7Zw42?;(J`Bh0^3^mT+116=u8g6B7{jyA8Z!O8H;ui7Z5d91rv!v!2Ov$;Z+d^1E}I%r%{q=m>|%7 zt%dspF-N5$U93#@+q9de+Z@(f2HP3G-GM0pXN>esnqHoK(%T=gmWCF#adcF$AP$NrvpEdn`|+5EHimI_v447P^%(o-_qv)dzbG)!2TQkCC{@f+Qr zf0y^I!&o_ucl1>aqXQnuio3jK{hNzp^NKzG=?f#x*Mcf3#wIyzO9MIlUHv(?>&#O# z{AW`_RU4xHWj7`t20x>xV+W05s(y5R5NmU1oq_MMLyGreI;G_(IrkJk=}5|a?Ciwz zckDf5xlY!V+7;u@Yvk-YN)|OiFOevD;l}H|^+}J5Zrzd%FdQNI#wJ43p+>Z;05RIw zq=M7SRgmJ~q}UV4HB~5pG(^hQWxxFUubMw^OlM6jJGK|b&pLCi^+OLZBQ7n)c;`>~E)BPKq z@E{O@RKfoKS&_|AsYyvN5#`jz(^mA{6}6V=Q>g75H@)VmFbV64C88fh^Zf97Y$;}; z)kIzyyB`hNr^`PQO`C*PL%cqH*ZbBMXh&?|Mx)904HCbE$nlL~tys%9`Q0p+Zrb$C zATj!?nibz(6{0|@6?45QTMwfP0^v8KmcDOz&KkHiHi(jrAlnJz*0VM~`B8MnubE~q z#@F0r;&2KVipCy9mRrgtf%iwYgZBe+7J~GWAOeW7? znIt(9cr`Wd%^-gVeMS@x?UO&M_u$*-d}K2%s5gCMVrD|9wduVNgRmiPCJ6s@2dU@XP@xvP7;bXbCIB#$?L|gIbayb&~H6P+D z#6661PBH=2_#CKYpJnTEo*QyVA`G$WnozpTi#_^4(!1!+X)-%-s7(oJ-CiP7P#SgH z$w4P)fJ-!eot)Jl9x#_+5K_OOToOF~vrukFImabQoKN8J%=_r4!Z4YB-;mMFG+)Z2 zXNU?xHSmp!I^jSFmuZT+n{4P!F zepK7s0V`%LhZQ9j4pQ@0L%a3HN39htMeQ9NlLvZWl#tp@|rDdk|CUI=U32sZ{ zVooAX{Q;RN{8Z>ug`%tv;R1e!3X(jyr0ypw8dJ~uONZ@NDSN0Ny6;zOTySXXC_mV=&X*Q)+Fs&z zDd?nbE2V_-!uji{e-1)e_i9)>ZH5^RO2+qJIg`R0&gVCBjsrT>nyJi0dZTD9-v$7B zdJ%~4Wril!k}O=#A@-tH)>6GO&S?ggPgr&1kfilqTPJ=G)J|TMv(Cbb*5X~NXPvXZ z$TQ7za+qUMl;2u(IrhDq%N%t`p5y)&r}rL4C_9b>q1odwE#t=yK(2C_XBt8w-GMUy z;9l4I)S-{Gn?m3^l@$-gsVo1xRau z+aBbYxayOg8+bdUKRD)*!1G_Bv?v60Z}o%Wmn&E+5o?Y&mBR;y7$m7eT0d=#PfE`8 zG-|>~nw*7cM;vnwGLEH4FrmUW+4cT57K~{g+;4pjP zD3R-0B;6N7RVU#~zft$20+G-NW%I|NIl@bRn zzKxS|Z#%5`TIZut-z!&?&D*?m@a?M zrT^^|Y)|&xaOn7^CaZRHHGwGMyHBwML3YRj{vEZeRKofcbte~Ifie2#$?3?MiF=(lv_N)Sa|77eo*>Br#|WyMA2X?AiwMxZm7;kP3V)xSvqNQr4zAvOxN|EXRsRtK70OpOqka;?`G@ zd`1bb8V8HAi)Uh>yFt$E`}lSSN$7%~S|Nm|+p`Allf*&$s~g?i&%*Za>dE;Iy?DNI zFDkCH^Gv8sd|~m1y5n?%t}B-;7I~dqdr#8`qb8qqQ#m}jFCL|(UH(t25b^HFK#=s+ zpV>`XSFTURI(DljuX7?fS2c4MIj^|ArU-3=>}ZZ`rHc0~39}~9wDl1%bL)Ng#BRe9 z-OjQ8Vc%kvywex-B zVzV3c4latMbCHtYFVMCHBMUo>+yxaIv^i!aD>E{|v&^(j4J;3<;;7@eXi z=pm+P`DaGdGx+`Fh=sSVt{yLuY`x_%EQrmChfrzx&JU~QmH2Cou)2aZNeSb%fc$7m zNSEC&pRJW74zKr_7^2!%l62{_w7*PlK=B8;$HcuN#F0I; z5z5e&QUIhg`F#^(imdTz?eM1XGRH~+^IWX!&(Y)1%hMtaB-cbBnw17)((Ee^boj?l z?kq>*S? z@BSu`Jc~z`{ZI1jWZ4IFv*;U9^gLirj5-5)21*8%K&g%$8A1@XIj((s_pOgBg*990 z6ul7)FU`bi?a>n=+g;!C98VCO3{QKVNu#)m>XAn0v@MLX0Zv?Uf8q3@ZM#c`;z3{y z&z%Gkiw^OlZpxlBWqSF5l)gGsx`p?mDP71A*3jmAq>^+C)uP&VT3=!?T)Rp4JT!Yl z14PuAzSZIBimpXcRHw5i6ELW3d0}I^?iSAO&fPBHMUY9TE>UG6NR$?Ks5Fq7=^Fph3ctgMbk4xtd~_dn?HQ<9zzAfp(B$2-V{wBhU-P0fk>qfLu7uG-0UqJF!KtB zMLmLzmMtVUPBk94Ggd1wnAnOPIYQcfC@}{uS7`24w&g{r9j?|bP>Nry(XIQmHX|jY z(vf|lj@1UY^4_kg-AA86vrl=LrX-?=Q8b!WlU8MrT;Z*SveCyL zKQSvzFc2qw={W{pQ6lyiuo5&iWCFe%G1CO_fvo9yVPewY<)}+xBBvK_(HH=Lm z0lS>v_%yo8hiXn+ow$p&%F7q)g>b>>jX`Tep>j?zh z0W>zF<4-7Wlo{IxIrMk&6N1UClJjaSvs~q`>ZfU|!I*N+tM`E{iGw}1kK5jHi&z`hl zrmw#A&*M%5IbW;N>(oirYVA8>a7lQ7P2}iTKAQA--MLhg|6d5xf$@y_hkB@hY!A*x-S|^icw$&89sQXw-8n^AJj{0 z8bNl&PveAB%pb1+>A)u)Q)eV~^FgiQ?_fjL(>J=o(>VGn$yl*E)%JeiNHdq&jAV#GO5Z1hBvA}UwMScpu;+02;e7mam zwM6Om*NwH!q&Ba{iyPcc*?t(v@5tOKyD*|5J>EqxUuKZUaH-QCo|?Q)IEXBAd`qIk zppx=ox8B;wuZVjCDJ|5PYCbot5MjC53_BTHiu1%38~?0bknA8KABCz+MfXTn&0NI6 zP4c0knV5yn*4x_?o}T)lj&H&ee$dh}eCr?K<7X(H@YmMY&FkBYkfEM+GmRz2pyMo` z;*0iQ9KL9Y=bE4iu9K>#s?;-;(Iw+b zR%_veShh9RmY}M$P>iBKGK%vz)iDL(rr>rB4~R*{wd>#?yP~T3%H$aG$ycs&@mKl- z%iQ)wL^J*rocy71mVgbX6mj~lvmE>TV76lQAMm7;OSD!U#`@SB_iy93)6380CPgpb z2k-CoTDNQsp9>>@zGJdsJGEXi6JnsDCFxsy*KC~$eE_7!qtW8>qL4FzOp|1@>>c7n z$WsNSZ%(rBOvK2raMjGtHus{bG>Pf9ohV%Iq5|t%(>Sz8m+RZ4%k^z(EF)x{l79fp z>cuZ9kv(GhTh*WB>uS8&jw$`6^uvr`09Eyf}M`)FFshq_Cw2m8BUSOcMPCTSW z014chJcF_4x%7};C}7X||D2EiZXx|HIm{#K{{+VYo9;6aaGHqXGvR=P?&Xw!sjOM;DVN8<+vM$?3jtm|AF9%=q}UEE5<%Rpj#b;O*Y*6HyRg(`v~Gn2jByzpKg4=2L+j>i zpb}6N9@>8|e2mBSZ3(v*fA9~3N^`H&pEIw}oVm9vw3fqD|JS6GVvlXDE`(Iw#oe7P zhtJs#1fQ}G=nc&(;5i)r5N$v8-8w{by_&^B65U<#j|Nm@SCW>{K z>h$iVrx1zfq*FoOU=7Nd(RgXk$EE_6H+pe7S z0ZaJx-@r(^yndZP@y~^QqAMl zK%&+a^uIm$cZJl(02I?_Mfl}>?DRQu!Q2c*{z34>ngA%E=eJ#(_2RpZgCskr9e~ zDtZkG^Volz-LSc7Q0Bb&HRivM`{(w-=G#D7I?~rz(8=-_Bg5{ELuo(duhA|?%ioO( zyUhz_c`E)V>%Y%n*bPT0PYm=Y-k%+l zccI{B&1+zoLHGM+!p4=Lw74gKqy62yu+a`ERbB5I^>V2Ef4l(fdK*flHMmCncUt^^ ksIY53DD}JHH7az1{2q%4gxY@v=IC!Y-2y}$jeh<0KiYwojQ{`u diff --git a/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar b/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar new file mode 120000 index 00000000..d8d21b57 --- /dev/null +++ b/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar @@ -0,0 +1 @@ +../../../../../devel/share/maven/org/ros/rosjava_messages/isaac_msgs/0.0.0/isaac_msgs-0.0.0.jar \ No newline at end of file diff --git a/apks/isaac_gs_ros_bridge/app/src/main/java/gov/nasa/arc/irg/astrobee/isaac_gs_ros_bridge/RosPubSub.java b/apks/isaac_gs_ros_bridge/app/src/main/java/gov/nasa/arc/irg/astrobee/isaac_gs_ros_bridge/RosPubSub.java index 6c32d646..e4916a90 100644 --- a/apks/isaac_gs_ros_bridge/app/src/main/java/gov/nasa/arc/irg/astrobee/isaac_gs_ros_bridge/RosPubSub.java +++ b/apks/isaac_gs_ros_bridge/app/src/main/java/gov/nasa/arc/irg/astrobee/isaac_gs_ros_bridge/RosPubSub.java @@ -155,6 +155,7 @@ public boolean handleGuestScienceCustomCmd(String command) { if (cmd_split[0].compareTo("iir") == 0) { // This is an image inspection result + // TODO(Katie) Need to add anomaly_result once it is added to the ground node Log.i(mBridgeService.ISAAC_GS_ROS_BRIDGE_TAG, "Got image inspection result"); ImageInspectionResult imageInspectionResultMsg = mImageInspectionResultPublisher.newMessage(); imageInspectionResultMsg.setResponse(Integer.parseInt(cmd_split[1])); @@ -170,7 +171,9 @@ public boolean handleGuestScienceCustomCmd(String command) { inspectionGoalMsg.setCommand(Byte.parseByte(cmd_split[1])); // Need to figure out how many poses we have, subtract 2 for gs command name // and inspection command arg - int num_args = cmd_split.length - 2; + // TODO(Katie) This extraction needs work. The inspection poses were changed from a + // TODO(Katie) list of poses to a pose array. + /*int num_args = cmd_split.length - 2; int num_poses = num_args/7; if (num_args % 7 == 0) { List inspectPoses = inspectionGoalMsg.getInspectPoses(); @@ -192,7 +195,7 @@ public boolean handleGuestScienceCustomCmd(String command) { inspectPoses.add(poseStamped); } inspectionGoalMsg.setInspectPoses(inspectPoses); - } + }*/ mInspectionGoalPublisher.publish(inspectionGoalMsg); } else { Log.e(mBridgeService.ISAAC_GS_ROS_BRIDGE_TAG, "Unknown custom guest science command."); @@ -235,20 +238,9 @@ public void inspectionFeedbackCallback(InspectionFeedback feedback) { public void inspectionResultCallback(InspectionResult result) { Log.i(mBridgeService.ISAAC_GS_ROS_BRIDGE_TAG, "Got inspection result"); // TODO(Katie) Extract header timestamp and frame id to be thorough + // TODO(Katie) Update to work with new inspection result String gs_data = ""; - ChannelBuffer vent_result = result.getVentResult(); - gs_data += vent_result.capacity() + "@"; - for (int i = 0; i < vent_result.capacity(); i++) { - gs_data += Byte.toString(vent_result.getByte(i)) + "@"; - } - - ChannelBuffer geometry_result = result.getGeometryResult(); - gs_data += geometry_result.capacity() + "@"; - for (int i = 0; i < geometry_result.capacity(); i++) { - gs_data += Byte.toString(geometry_result.getByte(i)) + "@"; - } - gs_data += result.getResponse() + "@"; gs_data += result.getFsmResult() + "\0"; diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc index 12629e5d..3d602a3d 100644 --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -556,7 +556,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // The sci cam image was received if (sci_cam_req_) { sci_cam_req_ = false; - result_.geometry_result.push_back(isaac_msgs::InspectionResult::PIC_ACQUIRED); + result_.inspection_result.push_back(isaac_msgs::InspectionResult::PIC_ACQUIRED); result_.picture_time.push_back(msg->header.stamp); if (goal_.command == isaac_msgs::InspectionGoal::ANOMALY && ground_active_) { @@ -584,7 +584,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { ROS_DEBUG_STREAM("IResultCallback()"); // Fill in the result message with the result if (result != nullptr) - result_.vent_result.push_back(result->response); + result_.anomaly_result.push_back(result->anomaly_result); else ROS_ERROR_STREAM("Invalid result received Image Analysis"); return fsm_.Update(NEXT_INSPECT); @@ -662,8 +662,8 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { } // Save new goal goal_ = *goal; - result_.vent_result.clear(); - result_.geometry_result.clear(); + result_.anomaly_result.clear(); + result_.inspection_result.clear(); result_.picture_time.clear(); goal_counter_ = 0; ROS_DEBUG_STREAM("RESET COUNTER"); diff --git a/astrobee/behaviors/inspection/tools/inspection_tool.cc b/astrobee/behaviors/inspection/tools/inspection_tool.cc index ac5ed55e..ef51b569 100644 --- a/astrobee/behaviors/inspection/tools/inspection_tool.cc +++ b/astrobee/behaviors/inspection/tools/inspection_tool.cc @@ -172,21 +172,14 @@ void ResultCallback(ff_util::FreeFlyerActionState::Enum code, case ff_util::FreeFlyerActionState::Enum::SUCCESS: std::cout << "[SUCCESS] "; if (FLAGS_anomaly) { - for (int i = 0; i < result->vent_result.size(); i++) { - switch (result->vent_result[i]) { - case isaac_msgs::InspectionResult::VENT_FREE: - std::cout << "Vent " << i <<" is free " << std::endl; break; - case isaac_msgs::InspectionResult::VENT_OBSTRUCTED: - std::cout << "Vent " << i <<" is obstructed " << std::endl; break; - case isaac_msgs::InspectionResult::INCONCLUSIVE: - std::cout << "Vent " << i <<" picture was inconclusive" << std::endl; break; - } + for (int i = 0; i < result->anomaly_result.size(); i++) { + std::cout << "Vent " << i <<" is " << result->anomaly_result[i].classifier_result << std::endl; } } if (FLAGS_geometry) { - for (int i = 0; i < result->geometry_result.size(); i++) { - switch (result->geometry_result[i]) { + for (int i = 0; i < result->inspection_result.size(); i++) { + switch (result->inspection_result[i]) { case isaac_msgs::InspectionResult::PIC_ACQUIRED: std::cout << "Pic " << i <<" was processed " << std::endl; break; } diff --git a/communications/ros_gs_bridge/src/ros_gs_bridge.cc b/communications/ros_gs_bridge/src/ros_gs_bridge.cc index 19b8847f..3e4deb87 100644 --- a/communications/ros_gs_bridge/src/ros_gs_bridge.cc +++ b/communications/ros_gs_bridge/src/ros_gs_bridge.cc @@ -165,55 +165,13 @@ void RosGsBridge::GuestScienceDataCallback( // TODO(Katie) } else if (data->topic == "ir") { // Data contains the inspection result + // TODO(Katie) Inspection result has been changed, need to fix this code isaac_msgs::InspectionResult result; - int vent_size, geometry_size, response; - - // Get vent array - if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing vent size out of " << - "inspection result. Data string is " << data_str); - return; - } - - vent_size = std::stoi(data_str.substr(0, delim_index)); - data_str.erase(0, (delim_index + 1)); - for (int i = 0; i < vent_size; i++) { - delim_index = data_str.find("@"); - if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing vent array out of " << - "inspection result. Data string is " << data_str); - return; - } - result.vent_result.push_back(std::stoi(data_str.substr(0, delim_index))); - data_str.erase(0, (delim_index + 1)); - } - - // Get geometry array - delim_index = data_str.find("@"); - if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing geometry size out of " << - "inspection result. Data string is " << data_str); - return; - } - - geometry_size = std::stoi(data_str.substr(0, delim_index)); - data_str.erase(0, (delim_index + 1)); - for (int i = 0; i < geometry_size; i++) { - delim_index = data_str.find("@"); - if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing geometry array out of" << - " inspection result. Data string is " << data_str); - return; - } - result.geometry_result.push_back(std::stoi(data_str.substr(0, - delim_index))); - data_str.erase(0, (delim_index + 1)); - } + int response; // Get response - delim_index = data_str.find("@"); if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing response out of " << + ROS_ERROR_STREAM("ros gs bridge: Error parsing vent size out of " << "inspection result. Data string is " << data_str); return; } @@ -339,6 +297,7 @@ bool RosGsBridge::GrabControlService(std_srvs::Empty::Request& req, void RosGsBridge::ImageInspectionResultCallback( ff_util::FreeFlyerActionState::Enum const& state, isaac_msgs::ImageInspectionResultConstPtr const& result) { + // TODO(Katie) Need to add the anomaly_result // Time stamp in the result header is not used so we don't need to package it // TODO(Katie) to be thorough, it would be good to package up the timestamp // TODO(Katie) change string to be json diff --git a/img_analysis/src/img_analysis_nodelet.cc b/img_analysis/src/img_analysis_nodelet.cc index bb120467..c1cf1c94 100755 --- a/img_analysis/src/img_analysis_nodelet.cc +++ b/img_analysis/src/img_analysis_nodelet.cc @@ -127,22 +127,25 @@ class ImageAnalysisNode : public ff_util::FreeFlyerNodelet { // Result isaac_msgs::ImageInspectionResult result; + result.anomaly_result.classifier_options.push_back("Vent free"); + result.anomaly_result.classifier_options.push_back("Vent has obstacle"); + result.anomaly_result.classifier_options.push_back("Unknown result, is robot looking at vent?"); switch (classification) { case vent_analysis_.vent_free_: - result.result = "Vent free"; - result.response = RESPONSE::VENT_FREE; + result.anomaly_result.classifier_result = "Vent free"; + result.response = RESPONSE::SUCCESS; break; case vent_analysis_.vent_blocked_: - result.result = "Vent has obstacle"; - result.response = RESPONSE::VENT_OBSTRUCTED; + result.anomaly_result.classifier_result = "Vent has obstacle"; + result.response = RESPONSE::SUCCESS; break; case vent_analysis_.vent_unknown_: - result.result = "Unknown result, is robot looking at vent?"; - result.response = RESPONSE::INCONCLUSIVE; + result.anomaly_result.classifier_result = "Unknown result, is robot looking at vent?"; + result.response = RESPONSE::SUCCESS; break; default: - result.result = "Image analysis failed"; + result.anomaly_result.classifier_result = "Image analysis failed"; result.response = RESPONSE::FAILED; } goal_.type = isaac_msgs::ImageInspectionGoal::NONE; diff --git a/isaac_msgs b/isaac_msgs index 4e08b717..c5b40c62 160000 --- a/isaac_msgs +++ b/isaac_msgs @@ -1 +1 @@ -Subproject commit 4e08b717104ffafe3c725aa4715041effd3b9ebe +Subproject commit c5b40c6248ce52b2bcc0ed1033492bab72a3fcb1 From a63c1a4b0f157761de5e4d2c74636c3e7876c4cd Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Wed, 2 Feb 2022 15:34:56 -0800 Subject: [PATCH 34/40] Apk gh action (#16) * apk first commit * fix bug * add java version check * change to 18.04 * add yes to install * add ndk * update gradle version * update gradle version 3.2.1 * using older ndk app * changing back gradle version * adding symlink * adding debug options * adding ndk folder * adding android_ndk_home * taking out extra symlink * build apk with newly generated .jar files * making jar compile inside gh action * adding remote repo option * remove remote from base image * fix path of jar file copy * fix path 2 * fix apk path * make sure the messages are copied * making images local * checkout submodules * Update apk.yaml * Update build.sh * update msgs image name + make find command more robust --- .github/workflows/apk.yaml | 71 ++++++++++++++++++++++++ scripts/docker/astrobee_msgs.Dockerfile | 1 - scripts/docker/build_msgs_jar.Dockerfile | 39 +++++++++++++ 3 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/apk.yaml create mode 100644 scripts/docker/build_msgs_jar.Dockerfile diff --git a/.github/workflows/apk.yaml b/.github/workflows/apk.yaml new file mode 100644 index 00000000..40100b33 --- /dev/null +++ b/.github/workflows/apk.yaml @@ -0,0 +1,71 @@ +# Check for lint error and auto correct them + +name: Compile APK + +on: ['push', 'pull_request'] + +jobs: + install_dependencies: + runs-on: ubuntu-18.04 + + steps: + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ + + + - name: Build image isaac/astrobee:msgs-ubuntu16.04 + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:astrobee-msgs-ubuntu16.04 + + - name: Build image isaac/isaac:msgs-ubuntu16.04 + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:msgs-ubuntu16.04 + + - name: Build image isaac/isaac:latest-msgs-jar-ubuntu16.04 + run: docker build isaac -f isaac/scripts/docker/build_msgs_jar.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:latest-msgs-jar-ubuntu16.04 + + - name: Copy jar files + run: | + docker cp $(docker create --rm isaac/isaac:latest-msgs-jar-ubuntu16.04):/src/msgs/devel/share/maven/ . + rm isaac/apks/isaac_gs_ros_bridge/app/libs/*msgs* + find maven -name *.jar -print0 | xargs -0 cp -t isaac/apks/isaac_gs_ros_bridge/app/libs + + - name: Install APK dependencies + run: | + sudo apt-get install -y libc6-dev-i386 lib32z1 openjdk-8-jdk + mkdir $HOME/android-sdk + cd $HOME/android-sdk + wget https://dl.google.com/android/repository/tools_r25.2.3-linux.zip + java -version + unzip tools_r25.2.3-linux.zip + tools/bin/sdkmanager --update + yes | tools/bin/sdkmanager "platforms;android-25" "build-tools;25.0.2" "extras;google;m2repository" "extras;android;m2repository" + wget https://dl.google.com/android/repository/android-ndk-r22b-linux-x86_64.zip + unzip android-ndk-r22b-linux-x86_64.zip + mv android-ndk-r22b ndk-bundle + cd ~/android-sdk/ndk-bundle/toolchains + ln -s aarch64-linux-android-4.9 mips64el-linux-android + ln -s arm-linux-androideabi-4.9 mipsel-linux-android + - name: Build APK + run: | + cd isaac/apks/isaac_gs_ros_bridge + ANDROID_HOME=$HOME/android-sdk ANDROID_NDK_HOME=$HOME/android-sdk/ndk-bundle ./gradlew build diff --git a/scripts/docker/astrobee_msgs.Dockerfile b/scripts/docker/astrobee_msgs.Dockerfile index f6c4dad9..33fb38f7 100644 --- a/scripts/docker/astrobee_msgs.Dockerfile +++ b/scripts/docker/astrobee_msgs.Dockerfile @@ -20,7 +20,6 @@ # You must set the docker context to be the repository root directory ARG UBUNTU_VERSION=16.04 - FROM ubuntu:${UBUNTU_VERSION} ARG ROS_VERSION=kinetic diff --git a/scripts/docker/build_msgs_jar.Dockerfile b/scripts/docker/build_msgs_jar.Dockerfile new file mode 100644 index 00000000..ab93ca6b --- /dev/null +++ b/scripts/docker/build_msgs_jar.Dockerfile @@ -0,0 +1,39 @@ +# Copyright (c) 2021, United States Government, as represented by the +# Administrator of the National Aeronautics and Space Administration. +# +# All rights reserved. +# +# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking +# platform" software is licensed under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with the +# License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This will set up an Astrobee docker container using the non-NASA install instructions. +# You must set the docker context to be the repository root directory + +ARG REMOTE=isaac +FROM ${REMOTE}/isaac:msgs-ubuntu16.04 + +RUN apt-get update && apt-get install -y \ + unzip \ + libc6-dev-i386 \ + lib32z1 \ + python-wstool \ + openjdk-8-jdk \ + ros-kinetic-rosjava \ + && rm -rf /var/lib/apt/lists/* + +# Compile msg jar files, genjava_message_artifacts only works with bash +RUN ["/bin/bash", "-c", "cd /src/msgs \ + && catkin config \ + && catkin build \ + && . devel/setup.bash \ + && genjava_message_artifacts --verbose -p ff_msgs ff_hw_msgs isaac_msgs isaac_hw_msgs"] From 67ff81f920924d26b3553ef5a77eca11451172b6 Mon Sep 17 00:00:00 2001 From: Marina Moreira Date: Tue, 8 Feb 2022 14:23:40 -0800 Subject: [PATCH 35/40] adding options to inspection tool --- .../inspection/src/inspection_node.cc | 23 +++- .../inspection/tools/inspection_tool.cc | 116 +++++++++++++++--- 2 files changed, 117 insertions(+), 22 deletions(-) diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc index 3d602a3d..1d92769e 100644 --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -178,7 +178,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { } } // Inspection is over, return - Result(RESPONSE::SUCCESS); + Result(RESPONSE::SUCCESS, "Inspection Over"); return STATE::WAITING; }); // [5] @@ -192,7 +192,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { return STATE::VOL_INSPECTION; } // Inspection is over, return - Result(RESPONSE::SUCCESS); + Result(RESPONSE::SUCCESS, "Inspection Over"); Dock("DOCK"); return STATE::RETURN_INSPECTION; }); @@ -595,9 +595,13 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // A new arm action has been called void GoalCallback(isaac_msgs::InspectionGoalConstPtr const& goal) { if (!goal_.inspect_poses.poses.empty()) { + isaac_msgs::InspectionResult result; switch (goal->command) { // Pause command case isaac_msgs::InspectionGoal::PAUSE: + result.fsm_result = "Pausing"; + result.response = RESPONSE::SUCCESS; + server_.SendResult(ff_util::FreeFlyerActionState::SUCCESS, result); return fsm_.Update(GOAL_PAUSE); // Resume command case isaac_msgs::InspectionGoal::RESUME: @@ -605,25 +609,24 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // Skip command case isaac_msgs::InspectionGoal::SKIP: { - isaac_msgs::InspectionResult result; if (!goal_.inspect_poses.poses.empty() && goal_counter_ < goal_.inspect_poses.poses.size()) { // Skip the current pose goal_counter_++; - result.fsm_result = "Skipped pose"; result.response = RESPONSE::SUCCESS; server_.SendResult(ff_util::FreeFlyerActionState::SUCCESS, result); + return fsm_.Update(GOAL_PAUSE); } else { result.fsm_result = "Nothing to skip"; result.response = RESPONSE::INVALID_COMMAND; server_.SendResult(ff_util::FreeFlyerActionState::ABORTED, result); + return fsm_.Update(GOAL_PAUSE); } return; } // Repeat last executed step command case isaac_msgs::InspectionGoal::REPEAT: { - isaac_msgs::InspectionResult result; if (!goal_.inspect_poses.poses.empty() && goal_counter_ > 0) { // Go back to the last pose goal_counter_--; @@ -631,10 +634,12 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { result.fsm_result = "Will repeat last pose"; result.response = RESPONSE::SUCCESS; server_.SendResult(ff_util::FreeFlyerActionState::SUCCESS, result); + return fsm_.Update(GOAL_PAUSE); } else { result.fsm_result = "Nothing to repeat"; result.response = RESPONSE::INVALID_COMMAND; server_.SendResult(ff_util::FreeFlyerActionState::ABORTED, result); + return fsm_.Update(GOAL_PAUSE); } return; } @@ -654,8 +659,16 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { << goal_.inspect_poses.poses[i].orientation.w << "\n"; } myfile.close(); + result.fsm_result = "Saved"; + result.response = RESPONSE::SUCCESS; + server_.SendResult(ff_util::FreeFlyerActionState::SUCCESS, result); + return fsm_.Update(GOAL_PAUSE); } else { NODELET_ERROR_STREAM("Nothing to save"); + result.fsm_result = "Nothing to save"; + result.response = RESPONSE::FAILED; + server_.SendResult(ff_util::FreeFlyerActionState::ABORTED, result); + return fsm_.Update(GOAL_PAUSE); } return; } diff --git a/astrobee/behaviors/inspection/tools/inspection_tool.cc b/astrobee/behaviors/inspection/tools/inspection_tool.cc index ef51b569..b2e0a24e 100644 --- a/astrobee/behaviors/inspection/tools/inspection_tool.cc +++ b/astrobee/behaviors/inspection/tools/inspection_tool.cc @@ -41,6 +41,7 @@ #include // C++ STL includes +#include #include #include #include @@ -87,6 +88,7 @@ DEFINE_double(deadline, -1.0, "Action deadline timeout"); // Match the internal states and responses with the message definition using STATE = isaac_msgs::InspectionState; +bool stopflag_ = false; bool has_only_whitespace_or_comments(const std::string & str) { for (std::string::const_iterator it = str.begin(); it != str.end(); it++) { @@ -156,11 +158,11 @@ void ReadFile(std::string file, isaac_msgs::InspectionGoal &goal) { // Inspection action feedback void FeedbackCallback(isaac_msgs::InspectionFeedbackConstPtr const& feedback) { - std::cout << "\r " + std::cout << "\r " << "\rFSM: " << feedback->state.fsm_event << " -> " << feedback->state.fsm_state << " (" << feedback->state.fsm_subevent - << " -> " << feedback->state.fsm_substate << ") " << std::flush; + << " -> " << feedback->state.fsm_substate << ")" << std::flush; } // Inspection action result @@ -205,18 +207,15 @@ void ResultCallback(ff_util::FreeFlyerActionState::Enum code, << " (Code " << result->response << ")" << std::endl; teardown: std::cout << std::endl; - // In all cases we must shutdown - ros::shutdown(); + // In the case we must shutdown + if (result->fsm_result == "Inspection Over") { + stopflag_ = true; + ros::shutdown(); + } } -// Ensure all clients are connected -void ConnectedCallback( - ff_util::FreeFlyerActionClient *client) { - // Check to see if connected - if (!client->IsConnected()) return; - // Print out a status message - std::cout << "\r " - << "\rState: CONNECTED" << std::flush; +// Send the inspection goal to the server +void SendGoal(ff_util::FreeFlyerActionClient *client) { // Prepare the goal isaac_msgs::InspectionGoal goal; std::string path = std::string(ros::package::getPath("inspection")); @@ -235,7 +234,7 @@ void ConnectedCallback( goal.command = isaac_msgs::InspectionGoal::ANOMALY; // Read file - std::cout << "Reading: " << FLAGS_anomaly_poses << std::endl; + // std::cout << "Reading: " << FLAGS_anomaly_poses << std::endl; ReadFile(path + FLAGS_anomaly_poses, goal); } else if (FLAGS_geometry) { @@ -243,7 +242,7 @@ void ConnectedCallback( goal.command = isaac_msgs::InspectionGoal::GEOMETRY; // Read file - std::cout << "Reading: " << FLAGS_geometry_poses << std::endl; + // std::cout << "Reading: " << FLAGS_geometry_poses << std::endl; ReadFile(path + FLAGS_geometry_poses, goal); } else if (FLAGS_panorama) { @@ -251,7 +250,7 @@ void ConnectedCallback( goal.command = isaac_msgs::InspectionGoal::PANORAMA; // Read file - std::cout << "Reading: " << FLAGS_panorama_poses << std::endl; + // std::cout << "Reading: " << FLAGS_panorama_poses << std::endl; ReadFile(path + FLAGS_panorama_poses, goal); } else if (FLAGS_volumetric) { @@ -259,13 +258,78 @@ void ConnectedCallback( goal.command = isaac_msgs::InspectionGoal::VOLUMETRIC; // Read file - std::cout << "Reading: " << FLAGS_volumetric_poses << std::endl; + // std::cout << "Reading: " << FLAGS_volumetric_poses << std::endl; ReadFile(path + FLAGS_volumetric_poses, goal); } client->SendGoal(goal); } +void GetInput(ff_util::FreeFlyerActionClient *client) { + while (!stopflag_ && ros::ok()) { + std::string line, val; + std::getline(std::cin, line); + try { + switch (std::stoi(line)) { + case 0: + std::cout << "Input: " << line << " - Exiting" << std::endl; + stopflag_ = true; + break; + case 1: + std::cout << "Input: " << line << " - Pausing" << std::endl; + FLAGS_pause = true; + SendGoal(client); + break; + case 2: + std::cout << "Input: " << line << " - Resuming" << std::endl; + FLAGS_pause = false; + FLAGS_resume = true; + SendGoal(client); + break; + case 3: + std::cout << "Input: " << line << " - Repeating" << std::endl; + FLAGS_pause = false; + FLAGS_resume = false; + FLAGS_repeat = true; + SendGoal(client); + break; + case 4: + std::cout << "Input: " << line << " - Skipping" << std::endl; + FLAGS_pause = false; + FLAGS_resume = false; + FLAGS_repeat = false; + FLAGS_skip = true; + SendGoal(client); + break; + case 5: + std::cout << "Input: " << line << " - Saving" << std::endl; + FLAGS_pause = false; + FLAGS_resume = false; + FLAGS_repeat = false; + FLAGS_skip = false; + FLAGS_save = true; + SendGoal(client); + break; + default: + std::cout << "Invalid option" << std::endl; + } + } catch (const std::invalid_argument&) { + continue; } + } + return; +} + +// Ensure all clients are connected +void ConnectedCallback( + ff_util::FreeFlyerActionClient *client) { + // Check to see if connected + if (!client->IsConnected()) return; + // Print out a status message + std::cout << "\r " + << "\rState: CONNECTED" << std::flush; + SendGoal(client); +} + // Main entry point for application int main(int argc, char *argv[]) { // Initialize a ros node @@ -320,8 +384,26 @@ int main(int argc, char *argv[]) { ros::shutdown(); } } + +std::cout << "\r " + << "Available actions:\n" + << "0) Exit \n" + << "1) Pause \n" + << "2) Resume \n" + << "3) Repeat \n" + << "4) Skip \n" + << "5) Save \n" + << "Specify the number of the command to publish and hit 'enter'.\n"<< std::endl; + + // Start input thread + boost::thread inp(GetInput, &client); // Synchronous mode - ros::spin(); + while (!stopflag_) { + ros::spinOnce(); + } + // Wait for thread to exit + inp.join(); + // Finish commandline flags google::ShutDownCommandLineFlags(); // Make for great success From 94496b9a4eaf0e973e004e8bd2002a4e3c2e2b62 Mon Sep 17 00:00:00 2001 From: Marina Moreira Date: Tue, 8 Feb 2022 17:07:45 -0800 Subject: [PATCH 36/40] polishing output from feedback --- .../inspection/tools/inspection_tool.cc | 43 +++++++++++-------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/astrobee/behaviors/inspection/tools/inspection_tool.cc b/astrobee/behaviors/inspection/tools/inspection_tool.cc index b2e0a24e..c1152db0 100644 --- a/astrobee/behaviors/inspection/tools/inspection_tool.cc +++ b/astrobee/behaviors/inspection/tools/inspection_tool.cc @@ -158,11 +158,13 @@ void ReadFile(std::string file, isaac_msgs::InspectionGoal &goal) { // Inspection action feedback void FeedbackCallback(isaac_msgs::InspectionFeedbackConstPtr const& feedback) { + std::cout << "\b\b\b\b\b\b\b\b\b\b\b\b\b\b "; std::cout << "\r " << "\rFSM: " << feedback->state.fsm_event << " -> " << feedback->state.fsm_state << " (" << feedback->state.fsm_subevent - << " -> " << feedback->state.fsm_substate << ")" << std::flush; + << " -> " << feedback->state.fsm_substate << ")" + << " | Input: " << std::flush; } // Inspection action result @@ -173,9 +175,12 @@ void ResultCallback(ff_util::FreeFlyerActionState::Enum code, switch (code) { case ff_util::FreeFlyerActionState::Enum::SUCCESS: std::cout << "[SUCCESS] "; + // If we get there then we have some response data + std::cout << result->fsm_result << " (Code " << result->response << ")"; + if (FLAGS_anomaly) { for (int i = 0; i < result->anomaly_result.size(); i++) { - std::cout << "Vent " << i <<" is " << result->anomaly_result[i].classifier_result << std::endl; + std::cout << "\n\tVent " << i <<" is " << result->anomaly_result[i].classifier_result; } } @@ -190,7 +195,7 @@ void ResultCallback(ff_util::FreeFlyerActionState::Enum code, break; case ff_util::FreeFlyerActionState::Enum::PREEMPTED: - std::cout << "[PREEMPT] "; break; + std::cout << "[PREEMPT] "; break; case ff_util::FreeFlyerActionState::Enum::ABORTED: std::cout << "[ABORTED] "; break; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_CONNECT: @@ -202,16 +207,16 @@ void ResultCallback(ff_util::FreeFlyerActionState::Enum code, case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_DEADLINE: std::cout << "Action timed out on deadline"; goto teardown; } - // If we get there then we have some response data - std::cout << result->fsm_result - << " (Code " << result->response << ")" << std::endl; -teardown: - std::cout << std::endl; - // In the case we must shutdown - if (result->fsm_result == "Inspection Over") { + // In the case we continue + if (result->fsm_result != "Inspection Over") { + std::cout << " | Input: " << std::flush; + return; + } + + teardown: + std::cout << std::endl; stopflag_ = true; ros::shutdown(); - } } // Send the inspection goal to the server @@ -269,49 +274,51 @@ void GetInput(ff_util::FreeFlyerActionClient *clie while (!stopflag_ && ros::ok()) { std::string line, val; std::getline(std::cin, line); + std::string clear_line; + clear_line.append(100, ' '); try { switch (std::stoi(line)) { case 0: - std::cout << "Input: " << line << " - Exiting" << std::endl; + std::cout << "\r Exiting" << clear_line << std::endl; stopflag_ = true; break; case 1: - std::cout << "Input: " << line << " - Pausing" << std::endl; FLAGS_pause = true; SendGoal(client); + std::cout << "\r Pausing" << clear_line << std::flush; break; case 2: - std::cout << "Input: " << line << " - Resuming" << std::endl; FLAGS_pause = false; FLAGS_resume = true; SendGoal(client); + std::cout << "\r Resuming" << clear_line << std::endl; break; case 3: - std::cout << "Input: " << line << " - Repeating" << std::endl; FLAGS_pause = false; FLAGS_resume = false; FLAGS_repeat = true; SendGoal(client); + std::cout << "\r Pausing and repeating pose (needs resume)" << clear_line << std::flush; break; case 4: - std::cout << "Input: " << line << " - Skipping" << std::endl; FLAGS_pause = false; FLAGS_resume = false; FLAGS_repeat = false; FLAGS_skip = true; SendGoal(client); + std::cout << "\r Pausing and skipping pose (needs resume)" << clear_line << std::flush; break; case 5: - std::cout << "Input: " << line << " - Saving" << std::endl; FLAGS_pause = false; FLAGS_resume = false; FLAGS_repeat = false; FLAGS_skip = false; FLAGS_save = true; SendGoal(client); + std::cout << "\r Pausing and saving (needs resume)" << clear_line << std::flush; break; default: - std::cout << "Invalid option" << std::endl; + std::cout << "\r Invalid option" << clear_line << std::endl; } } catch (const std::invalid_argument&) { continue; } From f730984daf0e0af86b28977bb0a9420e429f4b17 Mon Sep 17 00:00:00 2001 From: Marina Moreira Date: Wed, 9 Feb 2022 12:56:59 -0800 Subject: [PATCH 37/40] polising out from feedback 2 --- .../inspection/src/inspection_node.cc | 2 +- .../inspection/tools/inspection_tool.cc | 110 +++++++++++------- 2 files changed, 68 insertions(+), 44 deletions(-) diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc index 1d92769e..43e8417e 100644 --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -609,7 +609,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // Skip command case isaac_msgs::InspectionGoal::SKIP: { - if (!goal_.inspect_poses.poses.empty() && goal_counter_ < goal_.inspect_poses.poses.size()) { + if (!goal_.inspect_poses.poses.empty() && goal_counter_ < goal_.inspect_poses.poses.size() - 1) { // Skip the current pose goal_counter_++; result.fsm_result = "Skipped pose"; diff --git a/astrobee/behaviors/inspection/tools/inspection_tool.cc b/astrobee/behaviors/inspection/tools/inspection_tool.cc index c1152db0..53f8716c 100644 --- a/astrobee/behaviors/inspection/tools/inspection_tool.cc +++ b/astrobee/behaviors/inspection/tools/inspection_tool.cc @@ -158,60 +158,66 @@ void ReadFile(std::string file, isaac_msgs::InspectionGoal &goal) { // Inspection action feedback void FeedbackCallback(isaac_msgs::InspectionFeedbackConstPtr const& feedback) { - std::cout << "\b\b\b\b\b\b\b\b\b\b\b\b\b\b "; - std::cout << "\r " - << "\rFSM: " << feedback->state.fsm_event - << " -> " << feedback->state.fsm_state - << " (" << feedback->state.fsm_subevent - << " -> " << feedback->state.fsm_substate << ")" - << " | Input: " << std::flush; + std::string s; + s = feedback->state.fsm_event + + " -> " + feedback->state.fsm_state + + " (" + feedback->state.fsm_subevent + + " -> " + feedback->state.fsm_substate + ")"; + if (s.size() < 70) s.append(70 - s.size(), ' '); + std::cout << "\r" << s.substr(0, 70) << "|Input: " << std::flush; } // Inspection action result void ResultCallback(ff_util::FreeFlyerActionState::Enum code, isaac_msgs::InspectionResultConstPtr const& result) { - std::cout << std::endl << "Result: "; + std::string s; + s = "\nResult: "; // Print general response code switch (code) { case ff_util::FreeFlyerActionState::Enum::SUCCESS: - std::cout << "[SUCCESS] "; - // If we get there then we have some response data - std::cout << result->fsm_result << " (Code " << result->response << ")"; - - if (FLAGS_anomaly) { - for (int i = 0; i < result->anomaly_result.size(); i++) { - std::cout << "\n\tVent " << i <<" is " << result->anomaly_result[i].classifier_result; - } - } - - if (FLAGS_geometry) { - for (int i = 0; i < result->inspection_result.size(); i++) { - switch (result->inspection_result[i]) { - case isaac_msgs::InspectionResult::PIC_ACQUIRED: - std::cout << "Pic " << i <<" was processed " << std::endl; break; - } - } - } + s += "[SUCCESS] "; break; case ff_util::FreeFlyerActionState::Enum::PREEMPTED: - std::cout << "[PREEMPT] "; break; + s += "[PREEMPT] "; break; case ff_util::FreeFlyerActionState::Enum::ABORTED: - std::cout << "[ABORTED] "; break; + s += "[ABORTED] "; break; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_CONNECT: - std::cout << "Action timed out on connect"; goto teardown; + std::cout << "\nResult: Action timed out on connect"; goto teardown; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_ACTIVE: - std::cout << "Action timed out on active"; goto teardown; + std::cout << "\nResult: Action timed out on active"; goto teardown; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_RESPONSE: - std::cout << "Action timed out on response"; goto teardown; + std::cout << "\nResult: Action timed out on response"; goto teardown; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_DEADLINE: - std::cout << "Action timed out on deadline"; goto teardown; + std::cout << "\nResult: Action timed out on deadline"; goto teardown; + } + // Check that result var is valid + if (result != nullptr) { + // If we get there then we have some response data + s += result->fsm_result + " (Code " + std::to_string(result->response) + ")"; } + // Limit line to the maximum amout of characters + if (s.size() < 71) s.append(71 - s.size(), ' '); // In the case we continue if (result->fsm_result != "Inspection Over") { - std::cout << " | Input: " << std::flush; + s += "|Input: "; + std::cout << s << std::flush; return; } + std::cout << s << std::flush; + if (FLAGS_anomaly) { + for (int i = 0; i < result->anomaly_result.size(); i++) { + std::cout << "\n\tVent " << i <<" is " << result->anomaly_result[i].classifier_result; + } + } + + if (FLAGS_geometry) { + for (int i = 0; i < result->inspection_result.size(); i++) { + if (result->inspection_result[i] == isaac_msgs::InspectionResult::PIC_ACQUIRED) { + std::cout << "Pic " << i <<" was processed " << std::endl; break; + } + } + } teardown: std::cout << std::endl; @@ -274,31 +280,38 @@ void GetInput(ff_util::FreeFlyerActionClient *clie while (!stopflag_ && ros::ok()) { std::string line, val; std::getline(std::cin, line); - std::string clear_line; - clear_line.append(100, ' '); + std::string s; try { switch (std::stoi(line)) { case 0: - std::cout << "\r Exiting" << clear_line << std::endl; + s = "\r Input: " + line + ") Exiting"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::endl; stopflag_ = true; break; case 1: FLAGS_pause = true; SendGoal(client); - std::cout << "\r Pausing" << clear_line << std::flush; + s = "\r Input: " + line + ") Pausing"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::flush; break; case 2: FLAGS_pause = false; FLAGS_resume = true; SendGoal(client); - std::cout << "\r Resuming" << clear_line << std::endl; + s = "\r Input: " + line + ") Resuming"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::endl; break; case 3: FLAGS_pause = false; FLAGS_resume = false; FLAGS_repeat = true; SendGoal(client); - std::cout << "\r Pausing and repeating pose (needs resume)" << clear_line << std::flush; + s = "\r Input: " + line + ") Pausing and repeating pose (needs resume)"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::flush; break; case 4: FLAGS_pause = false; @@ -306,7 +319,9 @@ void GetInput(ff_util::FreeFlyerActionClient *clie FLAGS_repeat = false; FLAGS_skip = true; SendGoal(client); - std::cout << "\r Pausing and skipping pose (needs resume)" << clear_line << std::flush; + s = "\r Input: " + line + ") Pausing and skipping pose (needs resume)"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::flush; break; case 5: FLAGS_pause = false; @@ -315,13 +330,22 @@ void GetInput(ff_util::FreeFlyerActionClient *clie FLAGS_skip = false; FLAGS_save = true; SendGoal(client); - std::cout << "\r Pausing and saving (needs resume)" << clear_line << std::flush; + s = "\r Input: " + line + ") Pausing and saving (needs resume)"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::flush; break; default: - std::cout << "\r Invalid option" << clear_line << std::endl; + s = "\r Input: " + line + ") Invalid option"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::endl; } } catch (const std::invalid_argument&) { - continue; } + if (line != "") { + s = "\r Input: " + line + ") Invalid option"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::endl; + } + } } return; } From 5a0530a5e2c66535a4b785b793bc3ef355ee7776 Mon Sep 17 00:00:00 2001 From: Marina Moreira Date: Wed, 9 Feb 2022 16:04:50 -0800 Subject: [PATCH 38/40] pausing before exit --- astrobee/behaviors/inspection/tools/inspection_tool.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/astrobee/behaviors/inspection/tools/inspection_tool.cc b/astrobee/behaviors/inspection/tools/inspection_tool.cc index 53f8716c..9b45b29c 100644 --- a/astrobee/behaviors/inspection/tools/inspection_tool.cc +++ b/astrobee/behaviors/inspection/tools/inspection_tool.cc @@ -284,6 +284,8 @@ void GetInput(ff_util::FreeFlyerActionClient *clie try { switch (std::stoi(line)) { case 0: + FLAGS_pause = true; + SendGoal(client); s = "\r Input: " + line + ") Exiting"; if (s.size() < 80) s.append(80 - s.size(), ' '); std::cout << s << std::endl; From 44255c1f534c58dae870c9ca28f26df938366d82 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Fri, 11 Feb 2022 12:40:56 -0800 Subject: [PATCH 39/40] making panorama zigzag along longitude (#37) --- .../behaviors/inspection/src/inspection.cc | 59 +++++++++++++++---- 1 file changed, 47 insertions(+), 12 deletions(-) diff --git a/astrobee/behaviors/inspection/src/inspection.cc b/astrobee/behaviors/inspection/src/inspection.cc index 88c3be27..a29da70e 100644 --- a/astrobee/behaviors/inspection/src/inspection.cc +++ b/astrobee/behaviors/inspection/src/inspection.cc @@ -562,19 +562,54 @@ bool Inspection::PointInsideCuboid(geometry_msgs::Point const& x, if (pan_max_ - pan_min_ >= 2*PI) pan_max_-= 2 * EPS; // Go through all the points // Generate all the pan/tilt values - for (double tilt = tilt_min_; tilt <= tilt_max_ + EPS; tilt += k_tilt) { - for (double pan = pan_min_; pan <= pan_max_ + EPS; pan += k_pan) { - ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); - panorama_rotation.setRPY(0, tilt, pan); - panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * target_to_scicam_rot_; - point.orientation.x = panorama_rotation.x(); - point.orientation.y = panorama_rotation.y(); - point.orientation.z = panorama_rotation.z(); - point.orientation.w = panorama_rotation.w(); - panorama_relative.poses.push_back(point); - if ((tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) || (tilt < PI/2 + EPS && tilt > PI/2 - EPS)) - break; + int i = 0; + bool top_view = false; + bool bottom_view = false; + for (double pan = pan_min_; pan <= pan_max_ + EPS; pan += k_pan) { + // zig zag through views + if (i%2 == 0) { + for (double tilt = tilt_min_; tilt <= tilt_max_ + EPS; tilt += k_tilt) { + // Avoid taking multiple pictures at top and bottom + if (tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) { + if (bottom_view) {continue;} else {bottom_view = true;} + } + if (tilt < PI/2 + EPS && tilt > PI/2 - EPS) { + if (top_view) {continue;} else {top_view = true;} + } + + ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); + panorama_rotation.setRPY(0, tilt, pan); + panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * target_to_scicam_rot_; + point.orientation.x = panorama_rotation.x(); + point.orientation.y = panorama_rotation.y(); + point.orientation.z = panorama_rotation.z(); + point.orientation.w = panorama_rotation.w(); + panorama_relative.poses.push_back(point); + } + } else { + for (double tilt = tilt_max_; tilt >= tilt_min_ - EPS; tilt -= k_tilt) { + // Avoid taking multiple pictures at top and bottom + if (tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) { + if (bottom_view) {continue;} else {bottom_view = true;} + } + if (tilt < PI/2 + EPS && tilt > PI/2 - EPS) { + if (top_view) {continue;} else {top_view = true;} + } + + + ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); + panorama_rotation.setRPY(0, tilt, pan); + panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * target_to_scicam_rot_; + point.orientation.x = panorama_rotation.x(); + point.orientation.y = panorama_rotation.y(); + point.orientation.z = panorama_rotation.z(); + point.orientation.w = panorama_rotation.w(); + panorama_relative.poses.push_back(point); + // if ((tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) || (tilt < PI/2 + EPS && tilt > PI/2 - EPS)) + // break; + } } + i++; } // Go through all the panorama points From 3c96824927d3e6d1ff90daa4508555a9ea6971cb Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Mon, 14 Feb 2022 10:32:31 -0800 Subject: [PATCH 40/40] Release 0.2.4 (#38) * bump up version * making debian gen easier * check if porcelain * fix launch file for robot launch * make possible to write inspections plans in the robot --- INSTALL.md | 12 ++++++------ RELEASE.md | 6 ++++++ debian/changelog | 10 +++++++++- debian/isaac0.postinst | 2 ++ debian/rules | 16 +++++++--------- isaac.doxyfile | 2 +- isaac/launch/isaac_astrobee.launch | 6 ++++-- scripts/build/build_debian.sh | 12 ++++++------ scripts/build/isaac_cross.cmake | 7 +++++-- 9 files changed, 46 insertions(+), 27 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index cbd6e943..d8561607 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -91,17 +91,17 @@ Cross-compiling isaac --------- -To cross-compile ISAAC, one must first cross compile the astobee code using the NASA_INSTALL instructions. +To cross-compile ISAAC, one must first cross compile the astobee code using the NASA_INSTALL instructions. Note that ASTROBEE_WS must be defined. A new catkin profile should be made to retain the configurations and easily switch between normal build. catkin profile add cross catkin profile set cross - catkin config --extend $ARMHF_CHROOT_DIR/opt/astrobee \ - --build-space $ARMHF_CHROOT_DIR/home/astrobee/isaac/build \ - --install-space $ARMHF_CHROOT_DIR/opt/isaac \ - --devel-space $ARMHF_CHROOT_DIR/home/astrobee/isaac/devel \ - --log-space $ARMHF_CHROOT_DIR/home/astrobee/isaac/logs \ + catkin config --extend $ASTROBEE_WS/armhf/opt/astrobee \ + --build-space armhf/build \ + --install-space armhf/opt/isaac \ + --devel-space armhf/devel \ + --log-space armhf/logs \ --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection cargo isaac_hw_msgs wifi isaac gs_action_helper \ --install \ --cmake-args -DCMAKE_TOOLCHAIN_FILE=$ISAAC_WS/src/scripts/build/isaac_cross.cmake \ diff --git a/RELEASE.md b/RELEASE.md index 7e628ffd..55540d42 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,5 +1,11 @@ # Releases +## Release 0.2.4 + + * Inspection tool improvements + * Better CI + * Small bug fixes + ## Release 0.2.3 * Panorama picture taking mode supported diff --git a/debian/changelog b/debian/changelog index 1ddfd3e3..c7b1fd46 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,12 @@ -isaac (0.2.3) UNRELEASED; urgency=medium +isaac (0.2.4) testing; urgency=medium + + * Inspection tool improvements + * Better CI + * Small bug fixes + + -- Marina Moreira Fri, 11 Feb 2022 13:57:38 -0800 + +isaac (0.2.3) testing; urgency=medium * Panorama picture taking mode supported * Cargo transport simulation support diff --git a/debian/isaac0.postinst b/debian/isaac0.postinst index d0671567..2233df9d 100755 --- a/debian/isaac0.postinst +++ b/debian/isaac0.postinst @@ -2,7 +2,9 @@ if [ "$1" = "configure" ]; then chmod -R g+rwx /opt/isaac/env_wrapper.sh + chmod -R g+rwx /opt/isaac/share/inspection/resources if [ $(getent group users) ]; then chgrp -R users /opt/isaac/env_wrapper.sh + chgrp -R users /opt/isaac/share/inspection/resources fi fi diff --git a/debian/rules b/debian/rules index 74a13876..8a19d854 100755 --- a/debian/rules +++ b/debian/rules @@ -41,26 +41,24 @@ override_dh_auto_configure: catkin profile add debian --force && \ catkin profile set debian && \ catkin config \ - --extend $(ARMHF_CHROOT_DIR)/opt/astrobee \ - --build-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/build \ - --install-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/opt/isaac \ - --devel-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/devel \ - --log-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/logs \ + --extend $(ASTROBEE_WS)/armhf/opt/astrobee \ + --build-space debian/build \ + --install-space src/debian/tmp/opt/isaac \ + --devel-space debian/devel \ + --log-space debian/logs \ --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection cargo isaac_hw_msgs wifi isaac gs_action_helper \ --install \ --cmake-args \ -DCMAKE_TOOLCHAIN_FILE=$(CMAKE_TOOLCHAIN_FILE) \ -DARMHF_CHROOT_DIR=$(ARMHF_CHROOT_DIR) \ - -DARMHF_TOOLCHAIN=$(ARMHF_TOOLCHAIN) && \ - catkin clean -y --force + -DARMHF_TOOLCHAIN=$(ARMHF_TOOLCHAIN) override_dh_auto_build: ; # the install command is already invoked in the build override_dh_auto_install: cd .. && \ - catkin build && \ - rsync --remove-source-files -azh $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp src/debian + catkin build # we don't test override_dh_auto_test: ; diff --git a/isaac.doxyfile b/isaac.doxyfile index 7634df31..ad6b0fc5 100644 --- a/isaac.doxyfile +++ b/isaac.doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "ISAAC" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.2.3 +PROJECT_NUMBER = 0.2.4 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/isaac/launch/isaac_astrobee.launch b/isaac/launch/isaac_astrobee.launch index 5f41d84b..c03b154c 100644 --- a/isaac/launch/isaac_astrobee.launch +++ b/isaac/launch/isaac_astrobee.launch @@ -71,10 +71,12 @@ name="ROSCONSOLE_CONFIG_FILE" value="$(find astrobee)/resources/logging.config"/> + - - + diff --git a/scripts/build/build_debian.sh b/scripts/build/build_debian.sh index 26ead5dc..d4607783 100755 --- a/scripts/build/build_debian.sh +++ b/scripts/build/build_debian.sh @@ -20,11 +20,11 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -# if [ -n "$(git status --porcelain)" ]; then -# echo "You should not build Debians for a dirty source tree!" -# echo "Make sure all your changes are committed AND pushed to the server..." -# exit -1 -# fi +if [ -n "$(git status --porcelain)" ]; then + echo "You should not build Debians for a dirty source tree!" + echo "Make sure all your changes are committed AND pushed to the server..." + exit -1 +fi EXTRA_FLAGS="-b -a armhf" if [[ $* == *--config* ]]; then @@ -33,5 +33,5 @@ fi pushd $DIR/../.. export CMAKE_TOOLCHAIN_FILE=${DIR}/isaac_cross.cmake -DEB_BUILD_OPTIONS="parallel=8" debuild -e ARMHF_CHROOT_DIR -e ARMHF_TOOLCHAIN -e CMAKE_TOOLCHAIN_FILE -e CMAKE_PREFIX_PATH -us -uc $EXTRA_FLAGS +DEB_BUILD_OPTIONS="parallel=8" debuild -e ASTROBEE_WS -e ARMHF_CHROOT_DIR -e ARMHF_TOOLCHAIN -e CMAKE_TOOLCHAIN_FILE -e CMAKE_PREFIX_PATH -us -uc $EXTRA_FLAGS popd > /dev/null diff --git a/scripts/build/isaac_cross.cmake b/scripts/build/isaac_cross.cmake index c24b9874..d96a2087 100644 --- a/scripts/build/isaac_cross.cmake +++ b/scripts/build/isaac_cross.cmake @@ -52,8 +52,10 @@ SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) # If this is not done, the resulting rpath will not look for them in the chroot # environment. # Also, RTI DDS is included once for the libraries and once for the headers. +execute_process(COMMAND catkin locate -i OUTPUT_VARIABLE CATKIN_INSTALL_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) +execute_process(COMMAND catkin locate -d OUTPUT_VARIABLE CATKIN_DEVEL_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) SET(CMAKE_FIND_ROOT_PATH - ${ARM_CHROOT_DIR}) + ${ARM_CHROOT_DIR} ${CATKIN_INSTALL_PATH} ${CATKIN_DEVEL_PATH} $ENV{ASTROBEE_WS}/armhf/opt/astrobee) SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH) @@ -68,7 +70,8 @@ IF( DEFINED EXTRA_ROOT_PATH ) SET(CMAKE_FIND_ROOT_PATH ${EXTRA_ROOT_PATH} ${CMAKE_FIND_ROOT_PATH}) ENDIF( DEFINED EXTRA_ROOT_PATH ) -SET(catkin2_DIR ${CMAKE_SOURCE_DIR}/cmake) +execute_process(COMMAND catkin locate -s OUTPUT_VARIABLE CATKIN_SRC_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) +SET(catkin2_DIR ${CATKIN_SRC_PATH}/cmake) # needed for gflag to compile... SET( THREADS_PTHREAD_ARG