From c6cc7b5c0707a9b23b5fc4376f0a062052992bc6 Mon Sep 17 00:00:00 2001 From: Marina Moreira <67443181+marinagmoreira@users.noreply.github.com> Date: Thu, 16 Dec 2021 11:51:10 -0800 Subject: [PATCH] Release 0.2.3 (#10) * fixing CI actions (#1) * dense_mapper: Add reliability weights and happly.h (#3) * Panorama parameters (#4) * Panorama surveys (#6) * Readme update (#7) * support texturing haz cam and color images everywhere in the geometry mapper (#8) Co-authored-by: Marina Moreira Co-authored-by: Oleg Alexandrov Co-authored-by: Trey Smith Co-authored-by: Brian Coltin --- .github/workflows/ci_pr.yml | 66 +- .github/workflows/ci_push.yml | 78 +- .github/workflows/ci_release.yml | 72 +- DEMO_INSTALL.md | 15 +- INSTALL.md | 27 +- README.md | 84 +- RELEASE.md | 17 +- astrobee/behaviors/cargo/CMakeLists.txt | 10 +- astrobee/behaviors/cargo/readme.md | 2 +- astrobee/behaviors/cargo/src/cargo_node.cc | 4 +- astrobee/behaviors/inspection/CMakeLists.txt | 1 - .../include/inspection/inspection.h | 20 +- astrobee/behaviors/inspection/readme.md | 45 +- .../inspection/resources/panorama_granite.txt | 2 + .../inspection/resources/panorama_jpm.txt | 2 - .../inspection/resources/scicam_panorama.txt | 2 + .../resources/soundsee_backround_noise.txt | 2 + .../resources/soundsee_panorama.txt | 2 + .../inspection/scripts/pano_orientations.py | 131 +- .../behaviors/inspection/src/inspection.cc | 231 +- .../inspection/src/inspection_node.cc | 63 +- .../inspection/tools/inspection_tool.cc | 31 +- astrobee/behaviors/readme.md | 3 - astrobee/hardware/readme.md | 8 +- astrobee/hardware/wifi/readme.md | 8 +- astrobee/readme.md | 10 +- .../acoustics_cam/nodes/acoustics_cam | 28 +- astrobee/simulation/acoustics_cam/readme.md | 12 +- .../acoustics_cam/src/generate_pure_tones.py | 30 +- .../{gazebo => isaac_gazebo}/CMakeLists.txt | 0 .../launch/spawn_isaac.launch | 0 .../launch/spawn_object.launch | 0 .../launch/start_simulation.launch | 0 .../{gazebo => isaac_gazebo}/package.xml | 0 .../{gazebo => isaac_gazebo}/readme.md | 4 +- .../gazebo_model_plugin_cargo.cc | 0 .../gazebo_sensor_plugin_RFID_receiver.cc | 0 .../gazebo_sensor_plugin_RFID_tag.cc | 0 .../gazebo_sensor_plugin_air_quality.cc | 0 .../gazebo_sensor_plugin_heat_cam.cc | 0 .../gazebo_sensor_plugin_wifi_receiver.cc | 0 .../gazebo_sensor_plugin_wifi_transmitter.cc | 0 .../worlds/granite.world | 0 .../{gazebo => isaac_gazebo}/worlds/iss.world | 0 .../{gazebo => isaac_gazebo}/worlds/r2.world | 0 cmake/catkin2Config.cmake | 18 +- debian/changelog | 19 +- dense_map/geometry_mapper/CMakeLists.txt | 8 + .../include/dense_map_ros_utils.h | 7 + .../geometry_mapper/include/dense_map_utils.h | 14 +- dense_map/geometry_mapper/include/happly.h | 2013 +++++++++++++++ dense_map/geometry_mapper/readme.md | 55 +- .../src/dense_map_ros_utils.cc | 108 +- .../geometry_mapper/src/dense_map_utils.cc | 34 +- .../geometry_mapper/src/interest_point.cc | 23 +- .../geometry_mapper/tools/camera_refiner.cc | 220 +- .../geometry_mapper/tools/camera_refiner2.cc | 2151 +++++++++++++++++ .../tools/cameras_to_texrecon.py | 114 +- .../tools/colorize_bag_images.cc | 153 ++ .../tools/extract_pc_from_bag.cc | 11 +- .../geometry_mapper/tools/geometry_mapper.cc | 1289 +++++----- .../geometry_mapper/tools/geometry_mapper.py | 942 +++++--- .../geometry_mapper/tools/image_picker.cc | 4 +- dense_map/readme.md | 2 - dense_map/volumetric_mapper/readme.md | 12 +- img_analysis/readme.md | 18 +- img_analysis/resources/analyse_img.py | 69 - img_analysis/resources/vent_cnn.py | 156 -- img_analysis/scripts/analyse_img.py | 52 +- img_analysis/scripts/train_cnn_vent.py | 97 +- isaac.doxyfile | 2 +- isaac/CMakeLists.txt | 2 +- isaac/Subsystems.md | 2 +- isaac/config/behaviors/inspection.config | 112 +- isaac/launch/robot/ILP.launch | 10 - isaac/launch/sim.launch | 3 + licenses.csv | 3 +- scripts/docker/build.sh | 2 + scripts/git/cpplint_repo.py | 6 +- scripts/git/pre-commit.linter_cpp | 1 + scripts/git/pre-commit.linter_python | 32 +- scripts/setup/build_install_dependencies.sh | 6 +- scripts/update_release.sh | 19 + 83 files changed, 6862 insertions(+), 1937 deletions(-) create mode 100644 astrobee/behaviors/inspection/resources/panorama_granite.txt delete mode 100644 astrobee/behaviors/inspection/resources/panorama_jpm.txt create mode 100644 astrobee/behaviors/inspection/resources/scicam_panorama.txt create mode 100644 astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt create mode 100644 astrobee/behaviors/inspection/resources/soundsee_panorama.txt mode change 100755 => 100644 astrobee/behaviors/inspection/src/inspection.cc mode change 100755 => 100644 astrobee/behaviors/inspection/src/inspection_node.cc rename astrobee/simulation/{gazebo => isaac_gazebo}/CMakeLists.txt (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/launch/spawn_isaac.launch (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/launch/spawn_object.launch (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/launch/start_simulation.launch (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/package.xml (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/readme.md (98%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/worlds/granite.world (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/worlds/iss.world (100%) rename astrobee/simulation/{gazebo => isaac_gazebo}/worlds/r2.world (100%) create mode 100644 dense_map/geometry_mapper/include/happly.h create mode 100644 dense_map/geometry_mapper/tools/camera_refiner2.cc create mode 100644 dense_map/geometry_mapper/tools/colorize_bag_images.cc delete mode 100644 img_analysis/resources/analyse_img.py delete mode 100644 img_analysis/resources/vent_cnn.py create mode 100755 scripts/update_release.sh diff --git a/.github/workflows/ci_pr.yml b/.github/workflows/ci_pr.yml index dbb37e55..5463516b 100644 --- a/.github/workflows/ci_pr.yml +++ b/.github/workflows/ci_pr.yml @@ -14,21 +14,21 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu16.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa + -t isaac/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' -t isaac:latest-ubuntu16.04 build-bionic: @@ -39,21 +39,21 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu18.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t isaac/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 -t isaac:latest-ubuntu18.04 build-focal: @@ -64,19 +64,19 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu20.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t isaac/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 -t isaac:latest-ubuntu20.04 diff --git a/.github/workflows/ci_push.yml b/.github/workflows/ci_push.yml index 85f1e14e..9ba53e2c 100644 --- a/.github/workflows/ci_push.yml +++ b/.github/workflows/ci_push.yml @@ -14,22 +14,22 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=ghcr.io/nasa \ - -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu16.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04 - name: Log in to registry @@ -37,7 +37,7 @@ jobs: - name: Push Docker image run: | - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu16.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04; fi; build-bionic: @@ -48,22 +48,22 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ - -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu18.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04 - name: Log in to registry @@ -71,7 +71,7 @@ jobs: - name: Push Docker image run: | - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu18.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04; fi; build-focal: @@ -82,22 +82,22 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ - -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu20.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04 - name: Log in to registry @@ -105,5 +105,5 @@ jobs: - name: Push Docker image run: | - if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-ubuntu20.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04; fi; diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml index 462366c8..d47ab4a8 100644 --- a/.github/workflows/ci_release.yml +++ b/.github/workflows/ci_release.yml @@ -2,7 +2,7 @@ name: Build, test and push packages CI on: push: - branches: [ 'release' ] + branches: [ 'master' ] jobs: @@ -14,22 +14,22 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=ghcr.io/nasa \ + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa -t isaac/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=16.04 \ - --build-arg ROS_VERSION=kinetic \ - --build-arg PYTHON='' \ - --build-arg REMOTE=isaac \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=isaac -t isaac/isaac:latest-ubuntu16.04 - name: Log in to registry @@ -51,22 +51,22 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu18.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t isaac/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=18.04 \ - --build-arg ROS_VERSION=melodic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=isaac \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + --build-arg REMOTE=isaac -t isaac/isaac:latest-ubuntu18.04 - name: Log in to registry @@ -88,22 +88,22 @@ jobs: - uses: actions/checkout@v2 - name: Checkout submodule - run: git submodule update --init --depth 1 description/media + run: git submodule update --init --depth 1 isaac_msgs - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=ghcr.io/nasa \ - -t isaac/isaac:astrobee-ubuntu20.04 + run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t isaac/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile \ - --build-arg UBUNTU_VERSION=20.04 \ - --build-arg ROS_VERSION=noetic \ - --build-arg PYTHON=3 \ - --build-arg REMOTE=isaac \ + run: docker build . -f ./scripts/docker/isaac.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=isaac -t isaac/isaac:latest-ubuntu20.04 - name: Log in to registry diff --git a/DEMO_INSTALL.md b/DEMO_INSTALL.md index 3c17eb62..5c4e0439 100644 --- a/DEMO_INSTALL.md +++ b/DEMO_INSTALL.md @@ -1,8 +1,10 @@ -# Demo docker install +Demo docker install +===== Instructions for the full demo install -### Check out +Check out +--------- Run: @@ -15,7 +17,8 @@ Run: (You can also modify the checkout location `~/ws` if you want.) -### Install dependencies +Install dependencies +--------- Install docker tools: @@ -23,11 +26,13 @@ Install docker tools: Install nvidia-docker by following (the directions here)[https://github.com/NVIDIA/nvidia-docker]. -### Build +Build +--------- Run `scripts/docker/build.sh` to build the docker images for the demo. -### Install +Install +--------- Run `scripts/docker/run.sh` to run the demo. Open `http://127.0.0.1:8080` in a web browser to see what is happening. Use `docker ps` to see the docker containers and use `docker exec -it container_name /bin/bash` to get a shell in one. diff --git a/INSTALL.md b/INSTALL.md index 5157e7d9..d51722b7 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,6 +1,8 @@ -# Native Install +Native Install +===== -### Usage instructions for non-NASA and NASA users +Usage instructions for non-NASA and NASA users +--------- Install the 64-bit version of [Ubuntu 16.04, 18.04 or 20.04](http://releases.ubuntu.com/) on a host machine, and make sure that you can checkout and build code. @@ -16,11 +18,13 @@ any other operating system or Ubuntu versions.* *Note: Please ensure you install the 64-bit version of Ubuntu. We do not support running ISAAC Software on 32-bit systems.* -### Machine setup +Machine setup +--------- The `isaac` repo depends on some `astrobee` packages, therefore, `astrobee` needs to be installed beforehand. -### Checkout the project source code +Checkout the project source code +--------- At this point you need to decide where you'd like to put the ISAAC workspace and code (`ISAAC_WS`) on your machine (add this to your .bashrc for persistency): @@ -37,7 +41,8 @@ Checkout the submodule: git submodule update --init --recursive -### Dependencies +Dependencies +--------- Next, install all required dependencies: *Note: `root` access is necessary to install the packages below* @@ -52,13 +57,15 @@ Next, install all required dependencies: rosdep update popd -### Configuring the build +Configuring the build +--------- By default, the catkin uses the following paths: - devel build path: `$ISAAC_WS/devel` - install build path: `$ISAAC_WS/install` -### Building the code +Building the code +--------- Source your astrobee build environment, for example as: @@ -80,7 +87,8 @@ The command 'source devel/setup.bash' is very important to make sure that everyt If you are working in simulation only, then you're all done! The next steps are only for running ISAAC onboard Astrobee. -### Cross-compiling isaac +Cross-compiling isaac +--------- To cross-compile ISAAC, one must first cross compile the astobee code using the NASA_INSTALL instructions. @@ -99,7 +107,8 @@ A new catkin profile should be made to retain the configurations and easily swit --cmake-args -DCMAKE_TOOLCHAIN_FILE=$ISAAC_WS/src/scripts/build/isaac_cross.cmake \ -DARMHF_CHROOT_DIR=$ARMHF_CHROOT_DIR -### Build ISAAC debian +Build ISAAC debian +--------- To build a debian you must first confirm that cross-compiling is functional. Once it is: diff --git a/README.md b/README.md index bcb34ab2..3383cae3 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,53 @@ -# ISAAC Software - -### About - -The ISAAC project actively develops in a variety of repos hosted at -Ames and JSC, as described on the (collaboration tools wiki -page)[https://babelfish.arc.nasa.gov/confluence/display/astrosoft/Astrosoft+Collaboration+Tools] +ISAAC (Integrated System for Autonomous and Adaptive Caretaking) +===== + +[![GitHub license](https://img.shields.io/github/license/nasa/isaac)](https://github.com/nasa/isaac/blob/master/LICENSE) +[![Build, test and push packages CI](https://github.com/nasa/isaac/actions/workflows/ci_push.yml/badge.svg)](https://github.com/nasa/isaac/actions/workflows/ci_push.yml) + +The ISAAC project has three main technical thrusts: + +1. **Integrated data**: The current state of the art is that data from sensors associated with different facility + subsystems (Structures, GN&C, and ECLSS, etc.) remain siloed. ISAAC technology unifies facility data and models with + autonomous robotics, linking data streams from facility subsystems, sensor networks, and robots, as well as linking 3D + geometry and sensor data map layers, and detecting changes and anomalies. +2. **Integrated control interface**: Current state of the art manages facilities with largely independent interface tools + for different subsystems. Interfaces have different heritage, design assumptions and operator interface styles. Subsystem + interactions are hard to analyze due to poor connectivity between separate tools. ISAAC technology combines interface + tools for facility subsystems and autonomous robots; improves system-level situation awareness, situation understanding, + and operator productivity; enables linking and embedding between tools to improve subsystem interaction analysis; and + supports the entire activity life cycle from planning through execution and analysis. +3. **Coordinated execution**: The current state of the art for executing facility activities that require coordination + between subsystems is either manual commanding (operator tracks dependencies between subsystems) or simple sequencing + that can be brittle to even minor contingencies during execution. ISAAC technology models dependencies, uses automated + planning to translate a high-level task definition to a plan that can work given the current system state (e.g. include + actions to open hatches so that a robot can move where needed), and leverages ISAAC’s integrated data technology to + watch for execution contingencies and trigger replanning as needed. This technology will reduce the time to effect + changes on the system during critical faults and emergencies. This `isaac` repo serves as a master for integrating an end-to-end -demo that draws on code from the other repos (as well as directly +demo that draws on code from the other repos as well as directly including a significant amount of the ISAAC code, mostly relating to -the Astrobee robot). +the [Astrobee robot](https://github.com/nasa/astrobee). This repository includes: + +- [Astrobee software](https://nasa.github.io/isaac/html/astrobee.html) for added behaviors such as inpection and cargo transport as well as new sensor utilization. +- [Dense mapping](https://nasa.github.io/isaac/html/geometric_streaming_mapper.html) to create a textured 3D map +- [Volumetric mapping](https://nasa.github.io/isaac/html/volumetric_mapper.html) to map volumetric signals, such as WiFi. +- [Image analysis](https://nasa.github.io/isaac/html/ano.html) module to train a neural network to detect anomalies + +You may also be interested in the separate repository for the [ISAAC User Interface](https://github.com/nasa/isaac_user_interface), +which enables monitoring of multiple robots through a web browser. + +System requirements +--------- + +The `isaac` repo depends on the `astrobee` repo, therefore it inherits +the same system requirements. You must use Ubuntu 16.04 to 20.04 64-bit. When +running in simulation, certain Gazebo plugins require appropriate +graphics drivers. See INSTALL.md in that repository for more +information. + +Usage +--------- There are two main ways to install and run `isaac`: @@ -25,33 +63,27 @@ There are two main ways to install and run `isaac`: in your native OS, and you don't need to install for development prior to installing for demo. -### System requirements - -The `isaac` repo depends on the `astrobee` repo, therefore it inherits -the same system requirements. You must use Ubuntu 16.04 to 20.04 64-bit. When -running in simulation, certain Gazebo plugins require appropriate -graphics drivers. See INSTALL.md in that repository for more -information. - -### Usage +[Instructions on installing and using the ISAAC Software](https://nasa.github.io/isaac/html/md_INSTALL.html). For running the [docker demos](https://nasa.github.io/isaac/html/md_DEMO_INSTALL.html) -[Instructions on installing and using the ISAAC Software](INSTALL.md). [For running -the demos](DEMO_INSTALL.md) +Documentation +--------- -### Documentation +[The documentation is auto-generated from the contents of this repository.](https://nasa.github.io/isaac/documentation.html) -There are Doxygen comments in the header files. To compile (make sure you have the latest doxygen installed): +To compile the documentation locally (make sure you have the latest doxygen installed): doxygen isaac.doxyfile -### Contributing +Contributing +--------- The ISAAC Software is open source, and we welcome contributions from the public. Please submit pull requests to the `develop` branch. For us to merge any pull -requests, we must request that contributors sign and submit a Contributor License -Agreement due to NASA legal requirements. Thank you for your understanding. +requests, we must request that contributors sign and submit either an [Individual Contributor License Agreement](https://github.com/nasa/isaac/blob/94996bc1a20fa090336e67b3db5c10a9bb30f0f7/doc/cla/ISAAC_Individual%20CLA.pdf) or a [Corporate Contributor License +Agreement](https://github.com/nasa/isaac/blob/94996bc1a20fa090336e67b3db5c10a9bb30f0f7/doc/cla/ISAAC_Corporate%20CLA.pdf) due to NASA legal requirements. Thank you for your understanding. -### License +License +--------- Copyright (c) 2021, United States Government, as represented by the Administrator of the National Aeronautics and Space Administration. diff --git a/RELEASE.md b/RELEASE.md index bda6f065..7e628ffd 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,9 +1,22 @@ # Releases +## Release 0.2.3 + + * Panorama picture taking mode supported + * Cargo transport simulation support + +## Release 0.2.2 + + * Compatible wit 0.14.x versions, made for ISAAC 3 activity + +## Release 0.2.1 + + * Compatible wit 0.14.x versions, made for ISAAC 2 activity + ## Release 0.2.0 - * Ready for ISAAC 1-3 activities + * Compatible wit 0.14.x versions, made for ISAAC 1 activity ## Release 0.1.0 - * Demo June 2020 + * Initial release. Demo June 2020. diff --git a/astrobee/behaviors/cargo/CMakeLists.txt b/astrobee/behaviors/cargo/CMakeLists.txt index d2c555cf..f5d682c8 100644 --- a/astrobee/behaviors/cargo/CMakeLists.txt +++ b/astrobee/behaviors/cargo/CMakeLists.txt @@ -23,9 +23,8 @@ project(cargo) add_compile_options(-std=c++14) ## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS +SET(catkin2_DIR "${CMAKE_SOURCE_DIR}/../../../cmake") +find_package(catkin2 REQUIRED COMPONENTS roscpp std_msgs nodelet @@ -98,11 +97,6 @@ install(DIRECTORY launch/ DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/launch PATTERN ".svn" EXCLUDE) -# Mark resources files for installation -install(DIRECTORY resources/ - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/resources - PATTERN ".svn" EXCLUDE) - # Required plugin descriptor file for nodelet install(FILES nodelet_plugins.xml DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} diff --git a/astrobee/behaviors/cargo/readme.md b/astrobee/behaviors/cargo/readme.md index 93016ffa..4bae028f 100644 --- a/astrobee/behaviors/cargo/readme.md +++ b/astrobee/behaviors/cargo/readme.md @@ -2,7 +2,7 @@ This directory provides the cargo_tool -## Using the cargo tool +### Using the cargo tool This tool is used to initiate pickup and drop cargo actions. diff --git a/astrobee/behaviors/cargo/src/cargo_node.cc b/astrobee/behaviors/cargo/src/cargo_node.cc index 105c4a17..960f608d 100755 --- a/astrobee/behaviors/cargo/src/cargo_node.cc +++ b/astrobee/behaviors/cargo/src/cargo_node.cc @@ -95,7 +95,7 @@ class CargoNode : public ff_util::FreeFlyerNodelet { GOAL_UNPAUSE = (1<<6), // Resume an existing goal MOTION_SUCCESS = (1<<7), // Mobility motion action success MOTION_FAILED = (1<<8), // Mobility motion action problem - ARM_SUCCESS = (1<<9), // Arm motion action success + ARM_SUCCESS = (1<<9), // Arm motion action success ARM_FAILED = (1<<10), // Arm motion action problem DETECT_SUCCESS = (1<<11), // Detection AR tag success DETECT_FAILED = (1<<12), // Detection AR tag failed @@ -442,7 +442,7 @@ class CargoNode : public ff_util::FreeFlyerNodelet { } void GroundConnectedCallback() { - ROS_ERROR_STREAM("GroundConnectedCallback()"); + NODELET_DEBUG_STREAM("GroundConnectedCallback()"); if (!client_d_.IsConnected()) return; // Detection action ground_active_ = true; } diff --git a/astrobee/behaviors/inspection/CMakeLists.txt b/astrobee/behaviors/inspection/CMakeLists.txt index e77fd7f5..c5a10e8d 100644 --- a/astrobee/behaviors/inspection/CMakeLists.txt +++ b/astrobee/behaviors/inspection/CMakeLists.txt @@ -23,7 +23,6 @@ project(inspection) add_compile_options(-std=c++14) ## Find catkin macros and libraries -find_package(catkin REQUIRED) # defines catkin_DIR SET(catkin2_DIR "${CMAKE_SOURCE_DIR}/../../../cmake") find_package(catkin2 REQUIRED COMPONENTS roscpp diff --git a/astrobee/behaviors/inspection/include/inspection/inspection.h b/astrobee/behaviors/inspection/include/inspection/inspection.h index 766d05d0..f833bdec 100755 --- a/astrobee/behaviors/inspection/include/inspection/inspection.h +++ b/astrobee/behaviors/inspection/include/inspection/inspection.h @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -87,7 +88,9 @@ namespace inspection { class Inspection { public: // Constructor - Inspection(ros::NodeHandle* nh, ff_util::ConfigServer cfg); + Inspection(ros::NodeHandle* nh, ff_util::ConfigServer* cfg); + // Read parameters from config server + void ReadParam(); // Generate inspection segment bool GenSegment(geometry_msgs::Pose goal); // Remove head of segment if planing failed @@ -134,6 +137,10 @@ class Inspection { geometry_msgs::PoseArray points_; // Vector containing inspection poses + // Parameter clients + ff_util::ConfigServer *cfg_; + config_reader::ConfigReader cfg_cam_; + // Inspection parameters double opt_distance_; double dist_resolution_; @@ -143,8 +150,15 @@ class Inspection { double min_distance_; double horizontal_fov_; double aspect_ratio_; - double vent_size_x_; - double vent_size_y_; + double target_size_x_; + double target_size_y_; + + // Panorame parameters + double pan_min_; + double pan_max_; + double tilt_min_; + double tilt_max_; + double overlap_; // Publish Markers ros::Publisher pub_no_filter_; diff --git a/astrobee/behaviors/inspection/readme.md b/astrobee/behaviors/inspection/readme.md index 14d4902e..e280c7d6 100644 --- a/astrobee/behaviors/inspection/readme.md +++ b/astrobee/behaviors/inspection/readme.md @@ -1,8 +1,9 @@ -\page inspection Inspeiction Behavior +\page inspection Inspection Behavior This directory provides two tools: inspection_tool and sci_cam_tool. -## Using the inspection tool +Using the inspection tool +--------- This tool is used to initiate inspection actions. To run the tool: @@ -10,45 +11,27 @@ This tool is used to initiate inspection actions. To run the tool: As of now, the actions available are: -### Pause +*pause*: Pauses the current action being performed, the goal is kept, so a resume command will resume the action. -Pauses the current action being performed, the goal is kept, so a resume command will resume the action. +*resume*: Resumes a paused or failed action (in the case where there is a motion fail or picture fail). It will restart on the "move to inspection pose" step. -### Resume +*repeat*: Resumes the survey repeating the last inspection pose. -Resumes a paused or failed action (in the case where there is a motion fail or picture fail). It will restart on the "move to inspection pose" step. +*skip*: Skips the surrent inspection pose, useful if a certain pose is unreachable. -### Repeat -Resumes the survey repeating the last inspection pose. +*save*: Saves the current goal, in the case flight software needs to be restarted. The current survey is saved in resources/current.txt. It can be loaded afterwards using +*anomaly*: Starts an anomaly inspection action. The robot will come up to a target, take a picture and run the picture through the image anomaly detector to classify it. -### Skip +*geometry*: Starts a geometry inspection, meaning that it will go to the commanded poses and take pictures to be processed by the geometry mapper. -Skips the surrent inspection pose, useful if a certain pose is unreachable. +*panorama*: it will do a panorama survey of all points specified +*volumetric*: This will perform a volumetric survey -### Save - -Saves the current goal, in the case flight software needs to be restarted. The current survey is saved in resources/current.txt. It can be loaded afterwards using - -### Anomaly - -Starts an anomaly inspection action. The robot will come up to a target, take a picture and run the picture through the image anomaly detector to classify it. - -### Geometry - -Starts a geometry inspection, meaning that it will go to the commanded poses and take pictures to be processed by the geometry mapper. - -### Panorama - -it will do a panorama survey of all points specified - -### Volumetric - -This will perform a volumetric survey - -## Using sci_cam_tool +Using sci_cam_tool +--------- This tool is used to control the sci cam plugin in the Astrobee simulator, more precisely the way it acquires pictures. To use it, perform the following steps: diff --git a/astrobee/behaviors/inspection/resources/panorama_granite.txt b/astrobee/behaviors/inspection/resources/panorama_granite.txt new file mode 100644 index 00000000..af8530ba --- /dev/null +++ b/astrobee/behaviors/inspection/resources/panorama_granite.txt @@ -0,0 +1,2 @@ +# Panorama +0 0 -0.772 0 0 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/panorama_jpm.txt b/astrobee/behaviors/inspection/resources/panorama_jpm.txt deleted file mode 100644 index c102286f..00000000 --- a/astrobee/behaviors/inspection/resources/panorama_jpm.txt +++ /dev/null @@ -1,2 +0,0 @@ -# Panorama -10.5 -8 5.0 0 0 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/scicam_panorama.txt b/astrobee/behaviors/inspection/resources/scicam_panorama.txt new file mode 100644 index 00000000..46e20d03 --- /dev/null +++ b/astrobee/behaviors/inspection/resources/scicam_panorama.txt @@ -0,0 +1,2 @@ +# Panorama +11 -9 5.0 0 0 180 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt b/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt new file mode 100644 index 00000000..d3dd47a1 --- /dev/null +++ b/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt @@ -0,0 +1,2 @@ +# Panorama +11 -9 5.5 0 -90 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_panorama.txt b/astrobee/behaviors/inspection/resources/soundsee_panorama.txt new file mode 100644 index 00000000..d3dd47a1 --- /dev/null +++ b/astrobee/behaviors/inspection/resources/soundsee_panorama.txt @@ -0,0 +1,2 @@ +# Panorama +11 -9 5.5 0 -90 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/scripts/pano_orientations.py b/astrobee/behaviors/inspection/scripts/pano_orientations.py index ea206094..36a57cb1 100755 --- a/astrobee/behaviors/inspection/scripts/pano_orientations.py +++ b/astrobee/behaviors/inspection/scripts/pano_orientations.py @@ -16,7 +16,6 @@ import numpy as np from scipy.spatial.transform import Rotation - EPS = 1e-5 ROLL, PITCH, YAW = 0, 1, 2 @@ -32,18 +31,26 @@ def toRPY(rot): def pano1D(rangeMin, rangeMax, fov, overlap): """ Returns image center coordinates such that images cover the range - @rangeMin .. @rangeMax with at least the specified @overlap. + @rangeMin .. @rangeMax with at least the specified @overlap. If one + image suffices, it will be centered between @rangeMin and @rangeMax (and + the edges of the image will beyond the range if it is smaller than @fov). + If more than one image is needed to cover the range, the boundary images + will cover exactly to the edges of the specified range and the images + will be evenly spaced. :param float rangeMin: Minimum angle of minimum image (degrees). :param float rangeMax: Maximum angle of maximum image (degrees). :param float fov: Field of view of each image (degrees). - :param float overlap: Overlap between consecutive images, as a proportion - of the image field of view (0 .. 1). :return: A vector of orientations of - image centers. + :param float overlap: Minimum required overlap between consecutive images, as a proportion of the image field of view (0 .. 1). + :return: A vector of orientations of image centers. """ - assert rangeMin < rangeMax + assertLte(rangeMin, rangeMax, EPS) W = rangeMax - rangeMin + if W < fov: + # Special case: Only one image needed. Center it. + return np.array([0.5 * (rangeMin + rangeMax)]) + # sufficient overlap criterion: stride <= fov * (1 - overlap) # (k - 1) * stride + fov = W # stride = (W - fov) / (k - 1) @@ -55,14 +62,18 @@ def pano1D(rangeMin, rangeMax, fov, overlap): if 1: # optional sanity checks + stride = (W - fov) / (k - 1) assertLte(stride, fov * (1 - overlap), EPS) # sufficient overlap - if k > 2: - stride1 = (W - fov) / (k - 2) - assertGte(stride1, fov * (1 - overlap), EPS) # k is minimized + # check if we have more images than necessary + if k == 1: + pass # obviously need at least one image elif k == 2: assertLte(fov, W, EPS) # k = 1 is not enough + else: + stride1 = (W - fov) / (k - 2) + assertGte(stride1, fov * (1 - overlap), EPS) # k is minimized minCenter = rangeMin + fov / 2 maxCenter = rangeMax - fov / 2 @@ -76,7 +87,7 @@ def pano1DCompletePan(fov, overlap): including at the wrap-around, and the image sequence is centered at pan = 0. :param float fov: Field of view of each image (degrees). - :param float overlap: Overlap between consecutive images, as a proportion of the image field of view (0 .. 1). + :param float overlap: Minimum required overlap between consecutive images, as a proportion of the image field of view (0 .. 1). :return: A vector of orientations of image centers. """ k = math.ceil(360 / (fov * (1 - overlap))) @@ -99,7 +110,7 @@ def panoOrientations(panMin, panMax, tiltMin, tiltMax, hFov, vFov, overlap, preT :param float tiltMax: Tilt angle of top edge of top row (degrees). :param float hFov: Horizontal field of view of each image (degrees). :param float vFov: Vertical field of view of each image (degrees). - :param float overlap: Overlap between consecutive images, as a proportion of the image field of view (0 .. 1). + :param float overlap: Minimum required overlap between consecutive images, as a proportion of the image field of view (0 .. 1). :param float preTilt: Offsets the (pan, tilt) = (0, 0) center in tilt (degrees) so as to efficiently capture panoramas centered near tilt = +/- 90. Example: When preTilt = -90, tilt = 0 is offset to point straight down. :return: (imageCenters, ncols, nrows). A list of orientations of image centers, the number of columns in the panorama, and the number of rows. """ @@ -160,34 +171,46 @@ def checkPano(pano, panMin, panMax, tiltMin, tiltMax, hFov, vFov, overlap, preTi imageCenters, ncols, nrows = pano topLeft = getEuler(imageCenters[0], preTilt) - p11 = getEuler(imageCenters[nrows * 1 + 1], preTilt) - bottomRight = getEuler(imageCenters[-1], preTilt) - panStride = p11[YAW] - topLeft[YAW] - tiltStride = -(p11[PITCH] - topLeft[PITCH]) + if ncols == 1: + assertLte(panMax - panMin, hFov, EPS) # one column is enough + else: + nextPan = getEuler(imageCenters[nrows], preTilt) + panStride = nextPan[YAW] - topLeft[YAW] + assertLte(panStride, hFov * (1 - overlap), EPS) # pan overlaps enough - assertLte(panStride, hFov * (1 - overlap), EPS) # pan overlaps enough - assertLte(tiltStride, vFov * (1 - overlap), EPS) # tilt overlaps enough + if nrows == 1: + assertLte(tiltMax - tiltMin, vFov, EPS) # one row is enough + else: + nextTilt = getEuler(imageCenters[1], preTilt) + tiltStride = -(nextTilt[PITCH] - topLeft[PITCH]) + assertLte(tiltStride, vFov * (1 - overlap), EPS) # tilt overlaps enough - # we should not be able to remove a column - if ncols > 2: - if panMin == -180 and panMax == 180: - panStride1 = 360 / (ncols - 1) - else: - panStride1 = ((panMax - panMin) - hFov) / (ncols - 2) - assertGte(panStride1, hFov * (1 - overlap), EPS) # ncols is minimized + # we shouldn't be able to remove a column + if ncols == 1: + pass # obviously can't remove single column elif ncols == 2: if panMin == -180 and panMax == 180: assertLte(hFov * (1 - overlap), 360) # one column is not enough else: assertLte(hFov, panMax - panMin, EPS) # one column is not enough + else: + if panMin == -180 and panMax == 180: + panStride1 = 360 / (ncols - 1) + else: + panStride1 = ((panMax - panMin) - hFov) / (ncols - 2) + assertGte(panStride1, hFov * (1 - overlap), EPS) # ncols is minimized - # we should not be able to remove a row - if nrows > 2: - tiltStride1 = ((tiltMax - tiltMin) - vFov) / (nrows - 2) - assertGte(tiltStride1, vFov * (1 - overlap), EPS) # nrows is minimized + # we shouldn't be able to remove a row + if nrows == 1: + pass # obviously can't remove single row elif nrows == 2: assertLte(vFov, tiltMax - tiltMin, EPS) # one row is not enough + else: + tiltStride1 = ((tiltMax - tiltMin) - vFov) / (nrows - 2) + assertGte(tiltStride1, vFov * (1 - overlap), EPS) # nrows is minimized + + bottomRight = getEuler(imageCenters[-1], preTilt) panCenterMin = topLeft[YAW] panCenterMax = bottomRight[YAW] @@ -196,14 +219,24 @@ def checkPano(pano, panMin, panMax, tiltMin, tiltMax, hFov, vFov, overlap, preTi midPan = np.mean((panCenterMin, panCenterMax)) assertEqual(midPan, 0, EPS) # centered else: - assertEqual(panCenterMin - hFov / 2, panMin, EPS) # covers to panMin - assertEqual(panCenterMax + hFov / 2, panMax, EPS) # covers to panMax + if ncols == 1: + assertEqual( + panCenterMin, 0.5 * (panMin + panMax), EPS + ) # single column is centered + else: + assertEqual(panCenterMin - hFov / 2, panMin, EPS) # covers to panMin + assertEqual(panCenterMax + hFov / 2, panMax, EPS) # covers to panMax tiltCenterMax = topLeft[PITCH] tiltCenterMin = bottomRight[PITCH] - assertEqual(tiltCenterMin - vFov / 2, tiltMin, EPS) # covers to tiltMin - assertEqual(tiltCenterMax + vFov / 2, tiltMax, EPS) # covers to tiltMax + if nrows == 1: + assertEqual( + tiltCenterMin, 0.5 * (tiltMin + tiltMax), EPS + ) # single row is centered + else: + assertEqual(tiltCenterMin - vFov / 2, tiltMin, EPS) # covers to tiltMin + assertEqual(tiltCenterMax + vFov / 2, tiltMax, EPS) # covers to tiltMax def testCase(label, config): @@ -317,3 +350,37 @@ def testCase(label, config): "preTilt": -90, }, ) + +# == TEST CASE 5 == +# Test special-case logic for 1-row panorama. + +testCase( + "1-row panorama", + { + "panMin": -60, + "panMax": 60, + "tiltMin": 10, + "tiltMax": 20, + "hFov": 60, + "vFov": 60, + "overlap": 0.3, + "preTilt": 0, + }, +) + +# == TEST CASE 6 == +# Test special-case logic for 1-column panorama. + +testCase( + "1-column panorama", + { + "panMin": 0, + "panMax": 0, + "tiltMin": -60, + "tiltMax": 60, + "hFov": 60, + "vFov": 60, + "overlap": 0.3, + "preTilt": 0, + }, +) diff --git a/astrobee/behaviors/inspection/src/inspection.cc b/astrobee/behaviors/inspection/src/inspection.cc old mode 100755 new mode 100644 index dee28cbc..93678730 --- a/astrobee/behaviors/inspection/src/inspection.cc +++ b/astrobee/behaviors/inspection/src/inspection.cc @@ -20,7 +20,7 @@ // Include inspection library header #include #define PI 3.1415 - +#define EPS 1e-5 /** * \ingroup beh */ @@ -38,7 +38,14 @@ namespace inspection { It also constains functions that allow inspection visualization. */ - Inspection::Inspection(ros::NodeHandle* nh, ff_util::ConfigServer cfg) { + Inspection::Inspection(ros::NodeHandle* nh, ff_util::ConfigServer* cfg) { + // Setug config readers + cfg_ = cfg; + + cfg_cam_.AddFile("cameras.config"); + if (!cfg_cam_.ReadFiles()) + ROS_FATAL("Failed to read config files."); + // Create a transform buffer to listen for transforms tf_listener_ = std::shared_ptr( new tf2_ros::TransformListener(tf_buffer_)); @@ -61,24 +68,29 @@ namespace inspection { "markers/zones_check", 1, true); pub_map_check_ = nh->advertise( "markers/map_check", 1, true); + } + void Inspection::ReadParam() { // Parameters Anomaly survey - opt_distance_ = cfg.Get("optimal_distance"); - dist_resolution_ = cfg.Get("distance_resolution"); - angle_resolution_ = cfg.Get("angle_resolution"); - max_angle_ = cfg.Get("max_angle"); - max_distance_ = cfg.Get("max_distance"); - min_distance_ = cfg.Get("min_distance"); - horizontal_fov_ = cfg.Get("horizontal_fov"); - aspect_ratio_ = cfg.Get("aspect_ratio"); - vent_size_x_ = cfg.Get("vent_size_x"); - vent_size_y_ = cfg.Get("vent_size_y"); - vent_to_scicam_rot_ = tf2::Quaternion(cfg.Get("vent_to_sci_cam_rotation_x"), - cfg.Get("vent_to_sci_cam_rotation_y"), - cfg.Get("vent_to_sci_cam_rotation_z"), - cfg.Get("vent_to_sci_cam_rotation_w")); - - // Parameters Panorama survey + opt_distance_ = cfg_->Get("optimal_distance"); + dist_resolution_ = cfg_->Get("distance_resolution"); + angle_resolution_ = cfg_->Get("angle_resolution"); + max_angle_ = cfg_->Get("max_angle"); + max_distance_ = cfg_->Get("max_distance"); + min_distance_ = cfg_->Get("min_distance"); + target_size_x_ = cfg_->Get("target_size_x"); + target_size_y_ = cfg_->Get("target_size_y"); + vent_to_scicam_rot_ = tf2::Quaternion(cfg_->Get("vent_to_sci_cam_rotation_x"), + cfg_->Get("vent_to_sci_cam_rotation_y"), + cfg_->Get("vent_to_sci_cam_rotation_z"), + cfg_->Get("vent_to_sci_cam_rotation_w")); + + // Parameters Panorama survey + pan_min_ = cfg_->Get("pan_min") * PI / 180.0; + pan_max_ = cfg_->Get("pan_max") * PI / 180.0; + tilt_min_ = cfg_->Get("tilt_min") * PI / 180.0; + tilt_max_ = cfg_->Get("tilt_max") * PI / 180.0; + overlap_ = cfg_->Get("overlap"); } // Ensure all clients are connected @@ -116,6 +128,8 @@ namespace inspection { // MOVE ACTION CLIENT // Generate inspection segment bool Inspection::GenSegment(geometry_msgs::Pose goal) { + // Update parameters + ReadParam(); // Insert Offset tf2::Transform vent_transform; vent_transform.setOrigin(tf2::Vector3( @@ -214,22 +228,46 @@ namespace inspection { // Checks the given points agains whether the target is visible // from a camera picture bool Inspection::VisibilityConstraint(geometry_msgs::PoseArray &points) { + // Get camera parameters + Eigen::Matrix3d cam_mat; + float fx, fy, s, cx, cy; + int W, H; + + config_reader::ConfigReader::Table camera(&cfg_cam_, points.header.frame_id.c_str()); + // Read in distorted image size. + if (!camera.GetInt("width", &W)) + fprintf(stderr, "Could not read camera width."); + if (!camera.GetInt("height", &H)) + fprintf(stderr, "Could not read camera height."); + + config_reader::ConfigReader::Table vector(&camera, "intrinsic_matrix"); + for (int i = 0; i < 9; i++) { + if (!vector.GetReal((i + 1), &cam_mat(i / 3, i % 3))) { + fprintf(stderr, "Failed to read vector intrinsic_matrix."); + break; + } + } + // Read in focal length, optical offset and skew + fx = cam_mat(0, 0); + fy = cam_mat(1, 1); + s = cam_mat(0, 1); + cx = cam_mat(0, 2); + cy = cam_mat(1, 2); + // Build the matrix with the points to evaluate Eigen::MatrixXd p(4, 4); - p << vent_size_x_, vent_size_x_, -vent_size_x_, -vent_size_x_, - vent_size_y_, -vent_size_y_, vent_size_y_, -vent_size_y_, + p << target_size_x_, target_size_x_, -target_size_x_, -target_size_x_, + target_size_y_, -target_size_y_, target_size_y_, -target_size_y_, 0, 0, 0, 0, 1, 1, 1, 1; - // Build perspective matrix - float yScale = 1.0F / tan(horizontal_fov_ / 2); - float xScale = yScale / aspect_ratio_; + // Build projection matrix float farmnear = max_distance_ - min_distance_; Eigen::Matrix4d P; - P << xScale, 0, 0, 0, - 0, yScale, 0, 0, - 0, 0, max_distance_ / (farmnear), 1, - 0, 0, -min_distance_ * (max_distance_ / (farmnear)), 1; + P << 2*fx/W, 0, 0, 0, + 2*s/W, 2*fy/H, 0, 0, + 2*(cx/W)-1, 2*(cy/H)-1, max_distance_ / (farmnear), 1, + 0, 0, -min_distance_ * (max_distance_ / (farmnear)), 1; // Go through all the points in sorted segment std::vector::const_iterator it = points.poses.begin(); @@ -454,58 +492,103 @@ bool Inspection::PointInsideCuboid(geometry_msgs::Point const& x, publisher.publish(msg_visual); } -void Inspection::DrawInspectionFrostum() { -} + void Inspection::DrawInspectionFrostum() { + } + + // Generate the survey for panorama pictures + void Inspection::GeneratePanoramaSurvey(geometry_msgs::PoseArray &points_panorama) { + // Update parameters + ReadParam(); + + geometry_msgs::PoseArray panorama_relative; + geometry_msgs::PoseArray panorama_transformed; + geometry_msgs::PoseArray panorama_survey; -// Generate the survey for panorama pictures -void Inspection::GeneratePanoramaSurvey(geometry_msgs::PoseArray &points_panorama) { - geometry_msgs::PoseArray panorama_relative; - geometry_msgs::PoseArray panorama_transformed; - geometry_msgs::PoseArray panorama_survey; - - // Insert point - geometry_msgs::Pose point; - panorama_relative.header.frame_id = "sci_cam"; - point.position.x = 0.0; - point.position.y = 0.0; - point.position.z = 0.0; - tf2::Quaternion panorama_rotation; - // Go through all the points - for (double theta = -PI/2; theta <=PI/2; theta += PI/4) { - for (double phi = 0; phi < 2*PI; phi += PI/4) { - panorama_rotation.setRPY(0, theta, phi); - panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * vent_to_scicam_rot_; - point.orientation.x = panorama_rotation.x(); - point.orientation.y = panorama_rotation.y(); - point.orientation.z = panorama_rotation.z(); - point.orientation.w = panorama_rotation.w(); - panorama_relative.poses.push_back(point); - if (theta == -PI/2 || theta == PI/2) + // Insert point + geometry_msgs::Pose point; + panorama_relative.header.frame_id = points_panorama.header.frame_id.c_str(); + point.position.x = 0.0; + point.position.y = 0.0; + point.position.z = 0.0; + tf2::Quaternion panorama_rotation; + + + // Get camera parameters + Eigen::Matrix3d cam_mat; + float fx, fy; + int W, H; + + // Read in distorted image size. + config_reader::ConfigReader::Table camera(&cfg_cam_, points_panorama.header.frame_id.c_str()); + if (!camera.GetInt("width", &W)) { + ROS_ERROR("Could not read camera width."); + } + if (!camera.GetInt("height", &H)) { + ROS_ERROR("Could not read camera height."); + } + config_reader::ConfigReader::Table vector(&camera, "intrinsic_matrix"); + for (int i = 0; i < 9; i++) { + if (!vector.GetReal((i + 1), &cam_mat(i / 3, i % 3))) { + ROS_ERROR("Failed to read vector intrinsic_matrix."); break; + } + } + // Read in focal length + fx = cam_mat(0, 0); + fy = cam_mat(1, 1); + + // Calculate field of views + float h_fov = 2 * atan(W / (2 * fx)); + float v_fov = 2 * atan(H / (2 * fy)); + + // Calculate spacing between pictures + double k_pan = (pan_max_ - pan_min_) / std::ceil((pan_max_ - pan_min_) / (h_fov * (1 - overlap_))); + double k_tilt = (tilt_max_ - tilt_min_) / std::ceil((tilt_max_ - tilt_min_) / (v_fov * (1 - overlap_))); + + // Case where max and min is zero + if (std::isnan(k_pan)) k_pan = PI; + if (std::isnan(k_tilt)) k_tilt = PI; + + // If it's a full 360, skip the last one + if (pan_max_ - pan_min_ >= 2*PI) pan_max_-= 2 * EPS; // Go through all the points + + // Generate all the pan/tilt values + for (double tilt = tilt_min_; tilt <= tilt_max_ + EPS; tilt += k_tilt) { + for (double pan = pan_min_; pan <= pan_max_ + EPS; pan += k_pan) { + ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); + panorama_rotation.setRPY(0, tilt, pan); + panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * vent_to_scicam_rot_; + point.orientation.x = panorama_rotation.x(); + point.orientation.y = panorama_rotation.y(); + point.orientation.z = panorama_rotation.z(); + point.orientation.w = panorama_rotation.w(); + panorama_relative.poses.push_back(point); + if ((tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) || (tilt < PI/2 + EPS && tilt > PI/2 - EPS)) + break; + } } - } - - // Go through all the panorama points - for (int i = 0; i < points_panorama.poses.size(); ++i) { - // Transform the points from the camera reference frame to the robot body - tf2::Transform panorama_pose; - panorama_pose.setOrigin(tf2::Vector3( - points_panorama.poses[i].position.x, - points_panorama.poses[i].position.y, - points_panorama.poses[i].position.z)); - panorama_pose.setRotation(tf2::Quaternion( - points_panorama.poses[i].orientation.x, - points_panorama.poses[i].orientation.y, - points_panorama.poses[i].orientation.z, - points_panorama.poses[i].orientation.w)); - TransformList(panorama_relative, panorama_transformed, panorama_pose); - - panorama_survey.poses.insert(std::end(panorama_survey.poses), std::begin(panorama_transformed.poses), - std::end(panorama_transformed.poses)); + // Go through all the panorama points + for (int i = 0; i < points_panorama.poses.size(); ++i) { + // Transform the points from the camera reference frame to the robot body + + tf2::Transform panorama_pose; + panorama_pose.setOrigin(tf2::Vector3( + points_panorama.poses[i].position.x, + points_panorama.poses[i].position.y, + points_panorama.poses[i].position.z)); + panorama_pose.setRotation(tf2::Quaternion( + points_panorama.poses[i].orientation.x, + points_panorama.poses[i].orientation.y, + points_panorama.poses[i].orientation.z, + points_panorama.poses[i].orientation.w)); + TransformList(panorama_relative, panorama_transformed, panorama_pose); + + panorama_survey.poses.insert(std::end(panorama_survey.poses), std::begin(panorama_transformed.poses), + std::end(panorama_transformed.poses)); + } + points_panorama = panorama_survey; } - points_panorama = panorama_survey; -} } // namespace inspection diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc old mode 100755 new mode 100644 index 3aaee48d..92bd993b --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -325,7 +325,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { max_motion_retry_number_= cfg_.Get("max_motion_retry_number"); // Initiate inspection library - inspection_ = new Inspection(nh, cfg_); + inspection_ = new Inspection(nh, &cfg_); } // Ensure all clients are connected @@ -374,11 +374,6 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { bool validation = cfg_.Get("enable_validation"); bool boostrapping = cfg_.Get("enable_bootstrapping"); bool immediate = cfg_.Get("enable_immediate"); - bool timesync = cfg_.Get("enable_timesync"); - double desired_vel = cfg_.Get("desired_vel"); - double desired_accel = cfg_.Get("desired_accel"); - double desired_omega = cfg_.Get("desired_omega"); - double desired_alpha = cfg_.Get("desired_alpha"); // Reconfigure the choreographer ff_util::ConfigClient choreographer_cfg(GetPlatformHandle(), NODE_CHOREOGRAPHER); @@ -398,11 +393,6 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { choreographer_cfg.Set("enable_validation", validation); choreographer_cfg.Set("enable_bootstrapping", boostrapping); choreographer_cfg.Set("enable_immediate", immediate); - choreographer_cfg.Set("enable_timesync", timesync); - choreographer_cfg.Set("desired_vel", desired_vel); - choreographer_cfg.Set("desired_accel", desired_accel); - choreographer_cfg.Set("desired_omega", desired_omega); - choreographer_cfg.Set("desired_alpha", desired_alpha); if (!choreographer_cfg.Reconfigure()) { NODELET_ERROR_STREAM("Failed to reconfigure choreographer"); return false; @@ -530,37 +520,32 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // Signal an imminent sci cam image sci_cam_req_ = true; - // If using the simulation - if (sim_mode_) { - // Take picture - ff_msgs::CommandArg arg; - std::vector cmd_args; + // Take picture + ff_msgs::CommandArg arg; + std::vector cmd_args; - // The command sends two strings. The first has the app name, - // and the second the command value, encoded as json. + // The command sends two strings. The first has the app name, + // and the second the command value, encoded as json. - arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; - arg.s = "gov.nasa.arc.irg.astrobee.sci_cam_image"; - cmd_args.push_back(arg); + arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; + arg.s = "gov.nasa.arc.irg.astrobee.sci_cam_image"; + cmd_args.push_back(arg); - arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; - arg.s = "{\"name\": \"takeSinglePicture\"}"; - cmd_args.push_back(arg); + arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; + arg.s = "{\"name\": \"takePicture\"}"; + cmd_args.push_back(arg); - ff_msgs::CommandStamped cmd; - cmd.header.stamp = ros::Time::now(); - cmd.cmd_name = ff_msgs::CommandConstants::CMD_NAME_CUSTOM_GUEST_SCIENCE; - cmd.cmd_id = "inspection" + std::to_string(ros::Time::now().toSec()); - cmd.cmd_src = "guest science"; - cmd.cmd_origin = "guest science"; - cmd.args = cmd_args; + ff_msgs::CommandStamped cmd; + cmd.header.stamp = ros::Time::now(); + cmd.cmd_name = ff_msgs::CommandConstants::CMD_NAME_CUSTOM_GUEST_SCIENCE; + cmd.cmd_id = "inspection" + std::to_string(ros::Time::now().toSec()); + cmd.cmd_src = "guest science"; + cmd.cmd_origin = "guest science"; + cmd.args = cmd_args; - pub_guest_sci_.publish(cmd); - } else { - // If using the real robot - FILE *f = popen("ssh -t -t llp /opt/astrobee/ops/sci_cam_img_control.bash single pub_ros", "r"); - } + pub_guest_sci_.publish(cmd); + // Timer for the sci cam camera ros::Timer sci_cam_timeout_ = nh_->createTimer(ros::Duration(5), &InspectionNode::SciCamTimeout, this, true, false); @@ -737,8 +722,10 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // Callback to handle reconfiguration requests bool ReconfigureCallback(dynamic_reconfigure::Config & config) { - if ( fsm_.GetState() == STATE::WAITING) - return cfg_.Reconfigure(config); + if (cfg_.Reconfigure(config)) { + inspection_->ReadParam(); + return true; + } return false; } diff --git a/astrobee/behaviors/inspection/tools/inspection_tool.cc b/astrobee/behaviors/inspection/tools/inspection_tool.cc index 79baf25c..ac5ed55e 100644 --- a/astrobee/behaviors/inspection/tools/inspection_tool.cc +++ b/astrobee/behaviors/inspection/tools/inspection_tool.cc @@ -30,6 +30,8 @@ #include #include #include +#include + // Action #include @@ -63,12 +65,21 @@ DEFINE_bool(geometry, false, "Send the inspection command"); DEFINE_bool(panorama, false, "Send the inspection command"); DEFINE_bool(volumetric, false, "Send the inspection command"); +// Configurable Parameters +DEFINE_string(camera, "sci_cam", "Camera to use"); +DEFINE_double(tilt_max, 90.0, "Panorama: maximum tilt"); +DEFINE_double(tilt_min, -90.0, "Panorama: minimum tilt"); +DEFINE_double(pan_max, 180.0, "Panorama: maximum pan"); +DEFINE_double(pan_min, -180.0, "Panorama: minimum pan"); +DEFINE_double(overlap, 0.5, "Panorama: overlap between images"); + +// Plan files DEFINE_string(anomaly_poses, "/resources/vent_jpm.txt", "Vent pose list to inspect"); DEFINE_string(geometry_poses, "/resources/survey_bay_6.txt", "Geometry poses list to map"); DEFINE_string(panorama_poses, "/resources/panorama_jpm.txt", "Panorama poses list to map"); DEFINE_string(volumetric_poses, "/resources/wifi_jpm.txt", "Wifi poses list to map"); -// Timeout values +// Timeout values for action DEFINE_double(connect, 10.0, "Action connect timeout"); DEFINE_double(active, 10.0, "Action active timeout"); DEFINE_double(response, 200.0, "Action response timeout"); @@ -87,7 +98,7 @@ bool has_only_whitespace_or_comments(const std::string & str) { void ReadFile(std::string file, isaac_msgs::InspectionGoal &goal) { geometry_msgs::Pose pose; - goal.inspect_poses.header.frame_id = "world"; + goal.inspect_poses.header.frame_id = FLAGS_camera; // Read file geometry std::ifstream ifs((file).c_str()); if (!ifs.is_open()) { @@ -303,9 +314,19 @@ int main(int argc, char *argv[]) { std::placeholders::_1, std::placeholders::_2)); client.SetConnectedCallback(std::bind(ConnectedCallback, &client)); client.Create(&nh, ACTION_BEHAVIORS_INSPECTION); - // Print out a status message - std::cout << "\r " - << "\rState: CONNECTING" << std::flush; + // Configure inspection parameters + if (FLAGS_panorama) { + ff_util::ConfigClient cfg(&nh, NODE_INSPECTION); + cfg.Set("pan_min", FLAGS_pan_min); + cfg.Set("pan_max", FLAGS_pan_max); + cfg.Set("tilt_min", FLAGS_tilt_min); + cfg.Set("tilt_max", FLAGS_tilt_max); + cfg.Set("overlap", FLAGS_overlap); + if (!cfg.Reconfigure()) { + std::cout << "Could not reconfigure the inspection node " << std::endl; + ros::shutdown(); + } + } // Synchronous mode ros::spin(); // Finish commandline flags diff --git a/astrobee/behaviors/readme.md b/astrobee/behaviors/readme.md index cf40b0be..8de60c00 100644 --- a/astrobee/behaviors/readme.md +++ b/astrobee/behaviors/readme.md @@ -1,8 +1,5 @@ \page beh Behaviors - -## Overview - Behaviors are high-level actions that support complex maneuvers, such as control of the arm, docking, and perching. They are built upon the more more generic actions offered by other subsystems, such as the mobility subsystem's "motion" action, or the localization manager's "switch" action. \subpage inspection diff --git a/astrobee/hardware/readme.md b/astrobee/hardware/readme.md index 4680da30..ee311ba6 100644 --- a/astrobee/hardware/readme.md +++ b/astrobee/hardware/readme.md @@ -1,9 +1,3 @@ \page hw Hardware - -## Overview - -Hardware drivers - -\subpage wifi_driver -\subpage acoustics_camera \ No newline at end of file +\subpage wifi_driver : Hardware utilization nodes \ No newline at end of file diff --git a/astrobee/hardware/wifi/readme.md b/astrobee/hardware/wifi/readme.md index 7abb1c61..e03146dd 100644 --- a/astrobee/hardware/wifi/readme.md +++ b/astrobee/hardware/wifi/readme.md @@ -1,7 +1,5 @@ \page wifi_driver Wifi Driver -# Overview - This package provides the driver for publishing wifi strength info. I is based on the library available in https://github.com/bmegli/wifi-scan, adapted to ROS for this project. Because the function that scans all the networks needs network admin rights, there are 2 modes in which this driver can run. The modes can be set as: @@ -23,14 +21,14 @@ General Parameters: interface_name: to find ou the interface name, one can find out the interface name by running ifconfig in the cmd line. -# Scan Station +### Scan Station Publishes information to the topic hw/wifi/station. It only includes signal strength of the wifi network to which the robot is currently connected to. Scan Station Parameters: time_scan_station: update rate of the station scan, can be set upt to 50ms. -# Scan All +### Scan All Publishes information to the topic hw/wifi/all. Scan All Parameters: @@ -42,7 +40,7 @@ Scan All Parameters: All data is published as messages on ROS topics using the prefix hw/wifi. -# Lib mnl +### Lib mnl This package is needed in hardware/wifi node, such that we can scan the wifi networks. If you are missing this package, and have sudo rights, do: diff --git a/astrobee/readme.md b/astrobee/readme.md index b561418c..89e60ac6 100644 --- a/astrobee/readme.md +++ b/astrobee/readme.md @@ -1,12 +1,12 @@ \page astrobee Astrobee -This folder contains the ISAAC additions to the Astrobee FSW +The Astrobeemodule contains the ISAAC additions to the Astrobee FSW -\subpage beh : High-level actions that support complex maneuvers. +\subpage beh : High-level actions that support complex maneuvers. The Inspection behavior facilitates data collection events (with options as geometric survey, panorama survey, volumetric survey or anomaly survey); the cargo transport behavior contains the pick and drop off cargo actions. -\subpage hw : Hardware drivers +\subpage hw : Sensor utilization, containing the WiFi reader. -\subpage sim : Gazebo plugins for simulation +\subpage sim : Gazebo plugins for simulation including acoustic and heat camera, RFID reader and emmiter, as well as WiFi reader and emmiters. -\subpage gs_action_helper : Guest science action helper \ No newline at end of file +\subpage gs_action_helper : Guest science action helper used to communicate with the ground through DDS messages \ No newline at end of file diff --git a/astrobee/simulation/acoustics_cam/nodes/acoustics_cam b/astrobee/simulation/acoustics_cam/nodes/acoustics_cam index dfec1d61..cd0302a7 100755 --- a/astrobee/simulation/acoustics_cam/nodes/acoustics_cam +++ b/astrobee/simulation/acoustics_cam/nodes/acoustics_cam @@ -28,24 +28,32 @@ the result as a camera image, called acoustics_cam. # python 2/3 compatibility from __future__ import print_function -import copy, os, json, time, sys, cv2, math, re, threading -import numpy as np -import scipy.interpolate -from scipy.io import wavfile -from scipy.interpolate import RegularGridInterpolator +import copy +import json +import math +import os +import re +import sys +import threading +import time + +import cv2 +import geometry_msgs.msg import matplotlib.pyplot as plt +import numpy as np import pyroomacoustics as pra +import roslib # ROS import rospy -import roslib -import tf -import geometry_msgs.msg +import scipy.interpolate import std_msgs.msg -from sensor_msgs.msg import CameraInfo -from sensor_msgs.msg import Image +import tf from cv_bridge import CvBridge from ff_msgs.msg import CommandStamped +from scipy.interpolate import RegularGridInterpolator +from scipy.io import wavfile +from sensor_msgs.msg import CameraInfo, Image # Making this larger computes the spatial response at a denser set of directions. NUM_ANGLES = 4 * 45 * 22 # 180 * 90 diff --git a/astrobee/simulation/acoustics_cam/readme.md b/astrobee/simulation/acoustics_cam/readme.md index ef4eb097..a57fc13e 100644 --- a/astrobee/simulation/acoustics_cam/readme.md +++ b/astrobee/simulation/acoustics_cam/readme.md @@ -1,8 +1,6 @@ \page acoustics_camera Acoustics camera -# Acoustics camera - -## Overview +### Overview This camera simulates a microphone array, or, in other words, a directional microphone. Its readings are assembled into a spherical @@ -19,7 +17,7 @@ from. The reading at a pixel of that camera is the value of the microphone measurement in the direction of the ray going from the microphone (and camera) center through that pixel. -## Installation +### Installation The acoustics camera depends on the pyroomacoustics package. This package can be installed together with its dependencies in a Python @@ -33,7 +31,7 @@ It would normally install itself in: $HOME/.local/lib/python2.7/site-packages/pyroomacoustics -## Running the acoustics camera +### Running the acoustics camera The acoustics camera ROS node can be run as part of the simulator as: @@ -68,7 +66,7 @@ mode it will only create a plot of the acoustics cam image. The sources of sounds will be represented as crosses in this plot, and the camera (microphone) position will be shown as a star. -## ROS communication +### ROS communication The acoustics camera subscribes to @@ -93,7 +91,7 @@ must use the app name "gov.nasa.arc.irg.astrobee.acoustics_cam_image" (which is the "s" field in the first command argument) for it to be processed. -## Configuration +### Configuration The behavior of this camera is described in diff --git a/astrobee/simulation/acoustics_cam/src/generate_pure_tones.py b/astrobee/simulation/acoustics_cam/src/generate_pure_tones.py index 007054dc..3351cb70 100644 --- a/astrobee/simulation/acoustics_cam/src/generate_pure_tones.py +++ b/astrobee/simulation/acoustics_cam/src/generate_pure_tones.py @@ -19,17 +19,18 @@ # under the License. import numpy as np +from matplotlib import pyplot as plt +from scipy.interpolate import RegularGridInterpolator from scipy.io import wavfile from scipy.signal import welch -from scipy.interpolate import RegularGridInterpolator -from matplotlib import pyplot as plt fs = 32000 # sample rate, Hz -T = 30. # sample duration, seconds +T = 30.0 # sample duration, seconds freqs = [12333, 10533] # Hz rng = np.random.RandomState(23) + def get_signal(F, fs, T): t = np.arange(fs * T) / fs weight_sum = 0 @@ -43,32 +44,33 @@ def get_signal(F, fs, T): weight_sum += weight signal += weight * np.clip(1.0 / 3 * rng.normal(size=t.shape), -1, 1) - return (32768 / weight_sum * signal).astype('int16') + return (32768 / weight_sum * signal).astype("int16") + for F in freqs: signal = get_signal(F, fs, T) - fname = 'test_sounds/tone%s.wav' % F + fname = "test_sounds/tone%s.wav" % F wavfile.write(fname, fs, signal) - print('wrote %s' % fname) + print("wrote %s" % fname) for F in freqs: - fname = 'test_sounds/tone%s.wav' % F + fname = "test_sounds/tone%s.wav" % F fs, signal = wavfile.read(fname) - welch_F, Pxx = welch(signal, fs, nperseg=8192, average='median') + welch_F, Pxx = welch(signal, fs, nperseg=8192, average="median") plt.semilogy(welch_F, Pxx) interp = RegularGridInterpolator([welch_F], Pxx) - print('PSD for %s:' % fname) + print("PSD for %s:" % fname) for F in freqs: - print(' @ %s Hz: %s' % (F, interp([F]))) + print(" @ %s Hz: %s" % (F, interp([F]))) plt.legend([str(F) for F in freqs]) -plt.xlabel('Frequency (Hz)') -plt.ylabel('PSD ($V^2$ / Hz)') +plt.xlabel("Frequency (Hz)") +plt.ylabel("PSD ($V^2$ / Hz)") -fname = 'tones.png' +fname = "tones.png" plt.savefig(fname) -print('wrote %s' % fname) +print("wrote %s" % fname) plt.close() diff --git a/astrobee/simulation/gazebo/CMakeLists.txt b/astrobee/simulation/isaac_gazebo/CMakeLists.txt similarity index 100% rename from astrobee/simulation/gazebo/CMakeLists.txt rename to astrobee/simulation/isaac_gazebo/CMakeLists.txt diff --git a/astrobee/simulation/gazebo/launch/spawn_isaac.launch b/astrobee/simulation/isaac_gazebo/launch/spawn_isaac.launch similarity index 100% rename from astrobee/simulation/gazebo/launch/spawn_isaac.launch rename to astrobee/simulation/isaac_gazebo/launch/spawn_isaac.launch diff --git a/astrobee/simulation/gazebo/launch/spawn_object.launch b/astrobee/simulation/isaac_gazebo/launch/spawn_object.launch similarity index 100% rename from astrobee/simulation/gazebo/launch/spawn_object.launch rename to astrobee/simulation/isaac_gazebo/launch/spawn_object.launch diff --git a/astrobee/simulation/gazebo/launch/start_simulation.launch b/astrobee/simulation/isaac_gazebo/launch/start_simulation.launch similarity index 100% rename from astrobee/simulation/gazebo/launch/start_simulation.launch rename to astrobee/simulation/isaac_gazebo/launch/start_simulation.launch diff --git a/astrobee/simulation/gazebo/package.xml b/astrobee/simulation/isaac_gazebo/package.xml similarity index 100% rename from astrobee/simulation/gazebo/package.xml rename to astrobee/simulation/isaac_gazebo/package.xml diff --git a/astrobee/simulation/gazebo/readme.md b/astrobee/simulation/isaac_gazebo/readme.md similarity index 98% rename from astrobee/simulation/gazebo/readme.md rename to astrobee/simulation/isaac_gazebo/readme.md index 3ca8975f..79661374 100644 --- a/astrobee/simulation/gazebo/readme.md +++ b/astrobee/simulation/isaac_gazebo/readme.md @@ -5,7 +5,7 @@ Simulation contains the packages where the dense maps are built This page describes the isaac gazebo plugins. -# Heat cam +### Heat cam This plugin simulates a heat-detecting camera, with the color in the images it produces suggestive of the temperature on the surface of the @@ -72,3 +72,5 @@ at a specific time, or to take pictures continuously. Such a command must use the app name "gov.nasa.arc.irg.astrobee.heat_cam_image" (which is the "s" field in the first command argument) for it to be processed. + +\subpage acoustics_camera \ No newline at end of file diff --git a/astrobee/simulation/gazebo/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_model_plugin_cargo/gazebo_model_plugin_cargo.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_RFID_receiver/gazebo_sensor_plugin_RFID_receiver.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_RFID_tag/gazebo_sensor_plugin_RFID_tag.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_air_quality/gazebo_sensor_plugin_air_quality.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_heat_cam/gazebo_sensor_plugin_heat_cam.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_wifi_receiver/gazebo_sensor_plugin_wifi_receiver.cc diff --git a/astrobee/simulation/gazebo/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc b/astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc similarity index 100% rename from astrobee/simulation/gazebo/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc rename to astrobee/simulation/isaac_gazebo/src/gazebo_sensor_plugin_wifi_transmitter/gazebo_sensor_plugin_wifi_transmitter.cc diff --git a/astrobee/simulation/gazebo/worlds/granite.world b/astrobee/simulation/isaac_gazebo/worlds/granite.world similarity index 100% rename from astrobee/simulation/gazebo/worlds/granite.world rename to astrobee/simulation/isaac_gazebo/worlds/granite.world diff --git a/astrobee/simulation/gazebo/worlds/iss.world b/astrobee/simulation/isaac_gazebo/worlds/iss.world similarity index 100% rename from astrobee/simulation/gazebo/worlds/iss.world rename to astrobee/simulation/isaac_gazebo/worlds/iss.world diff --git a/astrobee/simulation/gazebo/worlds/r2.world b/astrobee/simulation/isaac_gazebo/worlds/r2.world similarity index 100% rename from astrobee/simulation/gazebo/worlds/r2.world rename to astrobee/simulation/isaac_gazebo/worlds/r2.world diff --git a/cmake/catkin2Config.cmake b/cmake/catkin2Config.cmake index 3229a1d0..90bd6f89 100644 --- a/cmake/catkin2Config.cmake +++ b/cmake/catkin2Config.cmake @@ -1,15 +1,14 @@ -# Copyright (c) 2021, United States Government, as represented by the +# Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# -# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking -# platform" software is licensed under the Apache License, Version 2.0 +# +# The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -28,6 +27,7 @@ # :outvar _INCLUDE_DIRS/_LIBRARY_DIRS/_LIBRARY: # contains the include dirs / library dirs / libraries of the searched component . +find_package(catkin REQUIRED) # defines catkin_DIR if(CATKIN_TOPLEVEL_FIND_PACKAGE OR NOT CATKIN_TOPLEVEL) set(catkin_EXTRAS_DIR "${catkin_DIR}") @@ -117,8 +117,8 @@ if(catkin2_FIND_COMPONENTS) # paths. This makes it impossible to cross compile unless we fix # the paths like so below. if (USE_CTC) - if (${component}_LIBRARIES) - set(temp_LIBRARIES) + if (${component}_LIBRARIES) + set(temp_LIBRARIES) foreach(library ${${component}_LIBRARIES}) string(REGEX REPLACE "^/usr/lib" "${ARM_CHROOT_DIR}/usr/lib" library ${library}) string(REPLACE "i386-linux-gnu" "arm-linux-gnueabihf" library ${library}) diff --git a/debian/changelog b/debian/changelog index bea60adc..1ddfd3e3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,26 +1,33 @@ +isaac (0.2.3) UNRELEASED; urgency=medium + + * Panorama picture taking mode supported + * Cargo transport simulation support + + -- Marina Moreira Mon, 13 Dec 2021 15:51:01 -0800 + isaac (0.2.2) testing; urgency=medium * Compatible wit 0.14.x versions, made for ISAAC 3 activity - -- Astrobee Flight Software Thu, 14 Apr 2021 10:37:25 -0800 + -- ISAAC Flight Software Thu, 14 Apr 2021 10:37:25 -0800 isaac (0.2.1) testing; urgency=medium * Compatible wit 0.14.x versions, made for ISAAC 2 activity - -- Astrobee Flight Software Thu, 21 Jan 2021 10:37:25 -0800 + -- ISAAC Flight Software Thu, 21 Jan 2021 10:37:25 -0800 isaac (0.2.0) testing; urgency=medium - * Compatible wit 0.14.x versions + * Compatible wit 0.14.x versions, made for ISAAC 1 activity - -- Astrobee Flight Software Thu, 21 Jan 2021 10:37:25 -0800 + -- ISAAC Flight Software Thu, 21 Jan 2021 10:37:25 -0800 isaac (0.1.0) testing; urgency=medium - * Initial release + * Initial release. Demo June 2020. - -- Astrobee Flight Software Thu, 21 Jan 2021 10:37:25 -0800 + -- ISAAC Flight Software Thu, 21 Jan 2021 10:37:25 -0800 diff --git a/dense_map/geometry_mapper/CMakeLists.txt b/dense_map/geometry_mapper/CMakeLists.txt index adf10910..6823fdb6 100644 --- a/dense_map/geometry_mapper/CMakeLists.txt +++ b/dense_map/geometry_mapper/CMakeLists.txt @@ -175,6 +175,10 @@ add_executable(camera_refiner tools/camera_refiner.cc) target_link_libraries(camera_refiner geometry_mapper_lib gflags glog) +add_executable(camera_refiner2 tools/camera_refiner2.cc) +target_link_libraries(camera_refiner2 + geometry_mapper_lib gflags glog) + add_executable(extract_pc_from_bag tools/extract_pc_from_bag.cc) target_link_libraries(extract_pc_from_bag geometry_mapper_lib gflags glog) @@ -187,6 +191,10 @@ add_executable(process_bag tools/process_bag.cc) target_link_libraries(process_bag geometry_mapper_lib gflags glog) +add_executable(colorize_bag_images tools/colorize_bag_images.cc) +target_link_libraries(colorize_bag_images + geometry_mapper_lib gflags glog) + add_executable(test_correct tools/test_correct.cc) target_link_libraries(test_correct geometry_mapper_lib gflags glog) diff --git a/dense_map/geometry_mapper/include/dense_map_ros_utils.h b/dense_map/geometry_mapper/include/dense_map_ros_utils.h index c647d94b..bd89a9c0 100644 --- a/dense_map/geometry_mapper/include/dense_map_ros_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_ros_utils.h @@ -50,6 +50,13 @@ void readBagPoses(std::string const& bag_file, std::string const& topic, Stamped void readBagImageTimestamps(std::string const& bag_file, std::string const& topic, std::vector& timestamps); +// Given a bag view, for each topic in the view read the vector of +// messages for that topic, sorted by message header timestamp. Only +// the following sensor types are supported: sensor_msgs::Image, +// sensor_msgs::CompressedImage, and sensor_msgs::PointCloud2. +void indexMessages(rosbag::View& view, // view can't be made const + std::map>& bag_map); + void readExifFromBag(std::vector const& bag_msgs, std::map>& exif); // Find an image at the given timestamp or right after it. We assume diff --git a/dense_map/geometry_mapper/include/dense_map_utils.h b/dense_map/geometry_mapper/include/dense_map_utils.h index 080bf5ab..24b03d0c 100644 --- a/dense_map/geometry_mapper/include/dense_map_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_utils.h @@ -41,7 +41,7 @@ namespace dense_map { -const int NUM_SCALE_PARAMS = 1; +const int NUM_SCALAR_PARAMS = 1; const int NUM_OPT_CTR_PARAMS = 2; // optical center in x and y const int NUM_RESIDUALS = 2; // Same as pixel size const int NUM_XYZ_PARAMS = 3; @@ -190,15 +190,21 @@ void pickTimestampsInBounds(std::vector const& timestamps, double left_b // Must always have NUM_EXIF the last. enum ExifData { TIMESTAMP = 0, EXPOSURE_TIME, ISO, APERTURE, FOCAL_LENGTH, NUM_EXIF }; -// Triangulate rays emanating from given undistorted and centered pixels +// Triangulate two rays emanating from given undistorted and centered pixels Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eigen::Affine3d const& world_to_cam1, Eigen::Affine3d const& world_to_cam2, Eigen::Vector2d const& pix1, Eigen::Vector2d const& pix2); -// A debug utility for saving a camera in a format ASP understands. +// Triangulate n rays emanating from given undistorted and centered pixels +Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, + std::vector const& world_to_cam_vec, + std::vector const& pix_vec); + +// A utility for saving a camera in a format ASP understands. // TODO(oalexan1): Expose the sci cam intrinsics rather than having // them hard-coded. -void save_tsai_camera(Eigen::MatrixXd const& desired_cam_to_world_trans, std::string const& output_dir, +void save_tsai_camera(Eigen::MatrixXd const& desired_cam_to_world_trans, + std::string const& output_dir, double curr_time, std::string const& suffix); } // namespace dense_map diff --git a/dense_map/geometry_mapper/include/happly.h b/dense_map/geometry_mapper/include/happly.h new file mode 100644 index 00000000..01d84502 --- /dev/null +++ b/dense_map/geometry_mapper/include/happly.h @@ -0,0 +1,2013 @@ +#pragma once + +/* A header-only implementation of the .ply file format. + * https://github.com/nmwsharp/happly + * By Nicholas Sharp - nsharp@cs.cmu.edu + * + * Version 2, July 20, 2019 + */ + +/* +MIT License + +Copyright (c) 2018 Nick Sharp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + + +// clang-format off +/* + + === Changelog === + + Significant changes to the file recorded here. + + - Version 5 (Aug 22, 2020) Minor: skip blank lines before properties in ASCII files + - Version 4 (Sep 11, 2019) Change internal list format to be flat. Other small perf fixes and cleanup. + - Version 3 (Aug 1, 2019) Add support for big endian and obj_info + - Version 2 (July 20, 2019) Catch exceptions by const reference. + - Version 1 (undated) Initial version. Unnamed changes before version numbering. + +*/ +// clang-format on + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// General namespace wrapping all Happly things. +namespace happly { + +// Enum specifying binary or ASCII filetypes. Binary can be little-endian +// (default) or big endian. +enum class DataFormat { ASCII, Binary, BinaryBigEndian }; + +// Type name strings +// clang-format off +template std::string typeName() { return "unknown"; } +template<> inline std::string typeName() { return "char"; } +template<> inline std::string typeName() { return "uchar"; } +template<> inline std::string typeName() { return "short"; } +template<> inline std::string typeName() { return "ushort"; } +template<> inline std::string typeName() { return "int"; } +template<> inline std::string typeName() { return "uint"; } +template<> inline std::string typeName() { return "float"; } +template<> inline std::string typeName() { return "double"; } + +// Template hackery that makes getProperty() and friends pretty while automatically picking up smaller types +namespace { + +// A pointer for the equivalent/smaller equivalent of a type (eg. when a double is requested a float works too, etc) +// long int is intentionally absent to avoid platform confusion +template struct TypeChain { bool hasChildType = false; typedef T type; }; +template <> struct TypeChain { bool hasChildType = true; typedef int32_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef int16_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef int8_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef uint32_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef uint16_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef uint8_t type; }; +template <> struct TypeChain { bool hasChildType = true; typedef float type; }; + +template struct CanonicalName { typedef T type; }; +template <> struct CanonicalName { typedef int8_t type; }; +template <> struct CanonicalName { typedef uint8_t type; }; +template <> struct CanonicalName { typedef std::conditional::type, int>::value, uint32_t, uint64_t>::type type; }; + +// Used to change behavior of >> for 8bit ints, which does not do what we want. +template struct SerializeType { typedef T type; }; +template <> struct SerializeType { typedef int32_t type; }; +template <> struct SerializeType< int8_t> { typedef int32_t type; }; + +// Give address only if types are same (used below when conditionally copying data) +// last int/char arg is to resolve ambiguous overloads, just always pass 0 and the int version will be preferred +template +S* addressIfSame(T&, char) { + throw std::runtime_error("tried to take address for types that are not same"); + return nullptr;} +template +S* addressIfSame(S& t, int) {return &t;} + +// clang-format on +} // namespace + +/** + * @brief A generic property, which is associated with some element. Can be plain Property or a ListProperty, of some + * type. Generally, the user should not need to interact with these directly, but they are exposed in case someone + * wants to get clever. + */ +class Property { + +public: + /** + * @brief Create a new Property with the given name. + * + * @param name_ + */ + Property(const std::string& name_) : name(name_){}; + virtual ~Property(){}; + + std::string name; + + /** + * @brief Reserve memory. + * + * @param capacity Expected number of elements. + */ + virtual void reserve(size_t capacity) = 0; + + /** + * @brief (ASCII reading) Parse out the next value of this property from a list of tokens. + * + * @param tokens The list of property tokens for the element. + * @param currEntry Index in to tokens, updated after this property is read. + */ + virtual void parseNext(const std::vector& tokens, size_t& currEntry) = 0; + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNext(std::istream& stream) = 0; + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNextBigEndian(std::istream& stream) = 0; + + /** + * @brief (reading) Write a header entry for this property. + * + * @param outStream Stream to write to. + */ + virtual void writeHeader(std::ostream& outStream) = 0; + + /** + * @brief (ASCII writing) write this property for some element to a stream in plaintext + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataASCII(std::ostream& outStream, size_t iElement) = 0; + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinary(std::ostream& outStream, size_t iElement) = 0; + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinaryBigEndian(std::ostream& outStream, size_t iElement) = 0; + + /** + * @brief Number of element entries for this property + * + * @return + */ + virtual size_t size() = 0; + + /** + * @brief A string naming the type of the property + * + * @return + */ + virtual std::string propertyTypeName() = 0; +}; + +namespace { + +/** + * Check if the platform is little endian. + * (not foolproof, but will work on most platforms) + * + * @return true if little endian + */ +bool isLittleEndian() { + int32_t oneVal = 0x1; + char* numPtr = (char*)&oneVal; + return (numPtr[0] == 1); +} + +/** + * Swap endianness. + * + * @param value Value to swap. + * + * @return Swapped value. + */ +template +T swapEndian(T val) { + char* bytes = reinterpret_cast(&val); + for (unsigned int i = 0; i < sizeof(val) / 2; i++) { + std::swap(bytes[sizeof(val) - 1 - i], bytes[i]); + } + return val; +} + + +// Unpack flattened list from the convention used in TypedListProperty +template +std::vector> unflattenList(const std::vector& flatList, const std::vector flatListStarts) { + size_t outerCount = flatListStarts.size() - 1; + + // Put the output here + std::vector> outLists(outerCount); + + if (outerCount == 0) { + return outLists; // quick out for empty + } + + // Copy each sublist + for (size_t iOuter = 0; iOuter < outerCount; iOuter++) { + size_t iFlatStart = flatListStarts[iOuter]; + size_t iFlatEnd = flatListStarts[iOuter + 1]; + outLists[iOuter].insert(outLists[iOuter].begin(), flatList.begin() + iFlatStart, flatList.begin() + iFlatEnd); + } + + return outLists; +} + + +}; // namespace + + +/** + * @brief A property which takes a single value (not a list). + */ +template +class TypedProperty : public Property { + +public: + /** + * @brief Create a new Property with the given name. + * + * @param name_ + */ + TypedProperty(const std::string& name_) : Property(name_) { + if (typeName() == "unknown") { + // TODO should really be a compile-time error + throw std::runtime_error("Attempted property type does not match any type defined by the .ply format."); + } + }; + + /** + * @brief Create a new property and initialize with data. + * + * @param name_ + * @param data_ + */ + TypedProperty(const std::string& name_, const std::vector& data_) : Property(name_), data(data_) { + if (typeName() == "unknown") { + throw std::runtime_error("Attempted property type does not match any type defined by the .ply format."); + } + }; + + virtual ~TypedProperty() override{}; + + /** + * @brief Reserve memory. + * + * @param capacity Expected number of elements. + */ + virtual void reserve(size_t capacity) override { data.reserve(capacity); } + + /** + * @brief (ASCII reading) Parse out the next value of this property from a list of tokens. + * + * @param tokens The list of property tokens for the element. + * @param currEntry Index in to tokens, updated after this property is read. + */ + virtual void parseNext(const std::vector& tokens, size_t& currEntry) override { + data.emplace_back(); + std::istringstream iss(tokens[currEntry]); + typename SerializeType::type tmp; // usually the same type as T + iss >> tmp; + data.back() = tmp; + currEntry++; + }; + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNext(std::istream& stream) override { + data.emplace_back(); + stream.read((char*)&data.back(), sizeof(T)); + } + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNextBigEndian(std::istream& stream) override { + data.emplace_back(); + stream.read((char*)&data.back(), sizeof(T)); + data.back() = swapEndian(data.back()); + } + + /** + * @brief (reading) Write a header entry for this property. + * + * @param outStream Stream to write to. + */ + virtual void writeHeader(std::ostream& outStream) override { + outStream << "property " << typeName() << " " << name << "\n"; + } + + /** + * @brief (ASCII writing) write this property for some element to a stream in plaintext + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataASCII(std::ostream& outStream, size_t iElement) override { + outStream.precision(std::numeric_limits::max_digits10); + outStream << static_cast::type>(data[iElement]); // case is usually a no-op + } + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinary(std::ostream& outStream, size_t iElement) override { + outStream.write((char*)&data[iElement], sizeof(T)); + } + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinaryBigEndian(std::ostream& outStream, size_t iElement) override { + auto value = swapEndian(data[iElement]); + outStream.write((char*)&value, sizeof(T)); + } + + /** + * @brief Number of element entries for this property + * + * @return + */ + virtual size_t size() override { return data.size(); } + + + /** + * @brief A string naming the type of the property + * + * @return + */ + virtual std::string propertyTypeName() override { return typeName(); } + + /** + * @brief The actual data contained in the property + */ + std::vector data; +}; + + +/** + * @brief A property which is a list of value (eg, 3 doubles). Note that lists are always variable length per-element. + */ +template +class TypedListProperty : public Property { + +public: + /** + * @brief Create a new Property with the given name. + * + * @param name_ + */ + TypedListProperty(const std::string& name_, int listCountBytes_) : Property(name_), listCountBytes(listCountBytes_) { + if (typeName() == "unknown") { + throw std::runtime_error("Attempted property type does not match any type defined by the .ply format."); + } + + flattenedIndexStart.push_back(0); + }; + + /** + * @brief Create a new property and initialize with data + * + * @param name_ + * @param data_ + */ + TypedListProperty(const std::string& name_, const std::vector>& data_) : Property(name_) { + if (typeName() == "unknown") { + throw std::runtime_error("Attempted property type does not match any type defined by the .ply format."); + } + + // Populate list with data + flattenedIndexStart.push_back(0); + for (const std::vector& vec : data_) { + for (const T& val : vec) { + flattenedData.emplace_back(val); + } + flattenedIndexStart.push_back(flattenedData.size()); + } + }; + + virtual ~TypedListProperty() override{}; + + /** + * @brief Reserve memory. + * + * @param capacity Expected number of elements. + */ + virtual void reserve(size_t capacity) override { + flattenedData.reserve(3 * capacity); // optimize for triangle meshes + flattenedIndexStart.reserve(capacity + 1); + } + + /** + * @brief (ASCII reading) Parse out the next value of this property from a list of tokens. + * + * @param tokens The list of property tokens for the element. + * @param currEntry Index in to tokens, updated after this property is read. + */ + virtual void parseNext(const std::vector& tokens, size_t& currEntry) override { + + std::istringstream iss(tokens[currEntry]); + size_t count; + iss >> count; + currEntry++; + + size_t currSize = flattenedData.size(); + size_t afterSize = currSize + count; + flattenedData.resize(afterSize); + for (size_t iFlat = currSize; iFlat < afterSize; iFlat++) { + std::istringstream iss(tokens[currEntry]); + typename SerializeType::type tmp; // usually the same type as T + iss >> tmp; + flattenedData[iFlat] = tmp; + currEntry++; + } + flattenedIndexStart.emplace_back(afterSize); + } + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNext(std::istream& stream) override { + + // Read the size of the list + size_t count = 0; + stream.read(((char*)&count), listCountBytes); + + // Read list elements + size_t currSize = flattenedData.size(); + size_t afterSize = currSize + count; + flattenedData.resize(afterSize); + if (count > 0) { + stream.read((char*)&flattenedData[currSize], count * sizeof(T)); + } + flattenedIndexStart.emplace_back(afterSize); + } + + /** + * @brief (binary reading) Copy the next value of this property from a stream of bits. + * + * @param stream Stream to read from. + */ + virtual void readNextBigEndian(std::istream& stream) override { + + // Read the size of the list + size_t count = 0; + stream.read(((char*)&count), listCountBytes); + if (listCountBytes == 8) { + count = (size_t)swapEndian((uint64_t)count); + } else if (listCountBytes == 4) { + count = (size_t)swapEndian((uint32_t)count); + } else if (listCountBytes == 2) { + count = (size_t)swapEndian((uint16_t)count); + } + + // Read list elements + size_t currSize = flattenedData.size(); + size_t afterSize = currSize + count; + flattenedData.resize(afterSize); + if (count > 0) { + stream.read((char*)&flattenedData[currSize], count * sizeof(T)); + } + flattenedIndexStart.emplace_back(afterSize); + + // Swap endian order of list elements + for (size_t iFlat = currSize; iFlat < afterSize; iFlat++) { + flattenedData[iFlat] = swapEndian(flattenedData[iFlat]); + } + } + + /** + * @brief (reading) Write a header entry for this property. Note that we already use "uchar" for the list count type. + * + * @param outStream Stream to write to. + */ + virtual void writeHeader(std::ostream& outStream) override { + // NOTE: We ALWAYS use uchar as the list count output type + outStream << "property list uchar " << typeName() << " " << name << "\n"; + } + + /** + * @brief (ASCII writing) write this property for some element to a stream in plaintext + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataASCII(std::ostream& outStream, size_t iElement) override { + size_t dataStart = flattenedIndexStart[iElement]; + size_t dataEnd = flattenedIndexStart[iElement + 1]; + + // Get the number of list elements as a uchar, and ensure the value fits + size_t dataCount = dataEnd - dataStart; + if (dataCount > std::numeric_limits::max()) { + throw std::runtime_error( + "List property has an element with more entries than fit in a uchar. See note in README."); + } + + outStream << dataCount; + outStream.precision(std::numeric_limits::max_digits10); + for (size_t iFlat = dataStart; iFlat < dataEnd; iFlat++) { + outStream << " " << static_cast::type>(flattenedData[iFlat]); // cast is usually a no-op + } + } + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinary(std::ostream& outStream, size_t iElement) override { + size_t dataStart = flattenedIndexStart[iElement]; + size_t dataEnd = flattenedIndexStart[iElement + 1]; + + // Get the number of list elements as a uchar, and ensure the value fits + size_t dataCount = dataEnd - dataStart; + if (dataCount > std::numeric_limits::max()) { + throw std::runtime_error( + "List property has an element with more entries than fit in a uchar. See note in README."); + } + uint8_t count = static_cast(dataCount); + + outStream.write((char*)&count, sizeof(uint8_t)); + outStream.write((char*)&flattenedData[dataStart], count * sizeof(T)); + } + + /** + * @brief (binary writing) copy the bits of this property for some element to a stream + * + * @param outStream Stream to write to. + * @param iElement index of the element to write. + */ + virtual void writeDataBinaryBigEndian(std::ostream& outStream, size_t iElement) override { + size_t dataStart = flattenedIndexStart[iElement]; + size_t dataEnd = flattenedIndexStart[iElement + 1]; + + // Get the number of list elements as a uchar, and ensure the value fits + size_t dataCount = dataEnd - dataStart; + if (dataCount > std::numeric_limits::max()) { + throw std::runtime_error( + "List property has an element with more entries than fit in a uchar. See note in README."); + } + uint8_t count = static_cast(dataCount); + + outStream.write((char*)&count, sizeof(uint8_t)); + for (size_t iFlat = dataStart; iFlat < dataEnd; iFlat++) { + T value = swapEndian(flattenedData[iFlat]); + outStream.write((char*)&value, sizeof(T)); + } + } + + /** + * @brief Number of element entries for this property + * + * @return + */ + virtual size_t size() override { return flattenedIndexStart.size() - 1; } + + + /** + * @brief A string naming the type of the property + * + * @return + */ + virtual std::string propertyTypeName() override { return typeName(); } + + /** + * @brief The (flattened) data for the property, as formed by concatenating all of the individual element lists + * together. + */ + std::vector flattenedData; + + /** + * @brief Indices in to flattenedData. The i'th element gives the index in to flattenedData where the element's data + * begins. A final entry is included which is the length of flattenedData. Size is N_elem + 1. + */ + std::vector flattenedIndexStart; + + /** + * @brief The number of bytes used to store the count for lists of data. + */ + int listCountBytes = -1; +}; + + +/** + * @brief Helper function to construct a new property of the appropriate type. + * + * @param name The name of the property to construct. + * @param typeStr A string naming the type according to the format. + * @param isList Is this a plain property, or a list property? + * @param listCountTypeStr If a list property, the type of the count varible. + * + * @return A new Property with the proper type. + */ +inline std::unique_ptr createPropertyWithType(const std::string& name, const std::string& typeStr, + bool isList, const std::string& listCountTypeStr) { + + // == Figure out how many bytes the list count field has, if this is a list type + // Note: some files seem to use signed types here, we read the width but always parse as if unsigned + int listCountBytes = -1; + if (isList) { + if (listCountTypeStr == "uchar" || listCountTypeStr == "uint8" || listCountTypeStr == "char" || + listCountTypeStr == "int8") { + listCountBytes = 1; + } else if (listCountTypeStr == "ushort" || listCountTypeStr == "uint16" || listCountTypeStr == "short" || + listCountTypeStr == "int16") { + listCountBytes = 2; + } else if (listCountTypeStr == "uint" || listCountTypeStr == "uint32" || listCountTypeStr == "int" || + listCountTypeStr == "int32") { + listCountBytes = 4; + } else { + throw std::runtime_error("Unrecognized list count type: " + listCountTypeStr); + } + } + + // = Unsigned int + + // 8 bit unsigned + if (typeStr == "uchar" || typeStr == "uint8") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 16 bit unsigned + else if (typeStr == "ushort" || typeStr == "uint16") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 32 bit unsigned + else if (typeStr == "uint" || typeStr == "uint32") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // = Signed int + + // 8 bit signed + if (typeStr == "char" || typeStr == "int8") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 16 bit signed + else if (typeStr == "short" || typeStr == "int16") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 32 bit signed + else if (typeStr == "int" || typeStr == "int32") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // = Float + + // 32 bit float + else if (typeStr == "float" || typeStr == "float32") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + // 64 bit float + else if (typeStr == "double" || typeStr == "float64") { + if (isList) { + return std::unique_ptr(new TypedListProperty(name, listCountBytes)); + } else { + return std::unique_ptr(new TypedProperty(name)); + } + } + + else { + throw std::runtime_error("Data type: " + typeStr + " cannot be mapped to .ply format"); + } +} + +/** + * @brief An element (more properly an element type) in the .ply object. Tracks the name of the elemnt type (eg, + * "vertices"), the number of elements of that type (eg, 1244), and any properties associated with that element (eg, + * "position", "color"). + */ +class Element { + +public: + /** + * @brief Create a new element type. + * + * @param name_ Name of the element type (eg, "vertices") + * @param count_ Number of instances of this element. + */ + Element(const std::string& name_, size_t count_) : name(name_), count(count_) {} + + std::string name; + size_t count; + std::vector> properties; + + /** + * @brief Check if a property exists. + * + * @param target The name of the property to get. + * + * @return Whether the target property exists. + */ + bool hasProperty(const std::string& target) { + for (std::unique_ptr& prop : properties) { + if (prop->name == target) { + return true; + } + } + return false; + } + + /** + * @brief Check if a property exists with the requested type. + * + * @tparam T The type of the property + * @param target The name of the property to get. + * + * @return Whether the target property exists. + */ + template + bool hasPropertyType(const std::string& target) { + for (std::unique_ptr& prop : properties) { + if (prop->name == target) { + TypedProperty* castedProp = dynamic_cast*>(prop.get()); + if (castedProp) { + return true; + } + return false; + } + } + return false; + } + + /** + * @brief A list of the names of all properties + * + * @return Property names + */ + std::vector getPropertyNames() { + std::vector names; + for (std::unique_ptr& p : properties) { + names.push_back(p->name); + } + return names; + } + + /** + * @brief Low-level method to get a pointer to a property. Users probably don't need to call this. + * + * @param target The name of the property to get. + * + * @return A (unique_ptr) pointer to the property. + */ + std::unique_ptr& getPropertyPtr(const std::string& target) { + for (std::unique_ptr& prop : properties) { + if (prop->name == target) { + return prop; + } + } + throw std::runtime_error("PLY parser: element " + name + " does not have property " + target); + } + + /** + * @brief Add a new (plain, not list) property for this element type. + * + * @tparam T The type of the property + * @param propertyName The name of the property + * @param data The data for the property. Must have the same length as the number of elements. + */ + template + void addProperty(const std::string& propertyName, const std::vector& data) { + + if (data.size() != count) { + throw std::runtime_error("PLY write: new property " + propertyName + " has size which does not match element"); + } + + // If there is already some property with this name, remove it + for (size_t i = 0; i < properties.size(); i++) { + if (properties[i]->name == propertyName) { + properties.erase(properties.begin() + i); + i--; + } + } + + // Copy to canonical type. Often a no-op, but takes care of standardizing widths across platforms. + std::vector::type> canonicalVec(data.begin(), data.end()); + + properties.push_back( + std::unique_ptr(new TypedProperty::type>(propertyName, canonicalVec))); + } + + /** + * @brief Add a new list property for this element type. + * + * @tparam T The type of the property (eg, "double" for a list of doubles) + * @param propertyName The name of the property + * @param data The data for the property. Outer vector must have the same length as the number of elements. + */ + template + void addListProperty(const std::string& propertyName, const std::vector>& data) { + + if (data.size() != count) { + throw std::runtime_error("PLY write: new property " + propertyName + " has size which does not match element"); + } + + // If there is already some property with this name, remove it + for (size_t i = 0; i < properties.size(); i++) { + if (properties[i]->name == propertyName) { + properties.erase(properties.begin() + i); + i--; + } + } + + // Copy to canonical type. Often a no-op, but takes care of standardizing widths across platforms. + std::vector::type>> canonicalListVec; + for (const std::vector& subList : data) { + canonicalListVec.emplace_back(subList.begin(), subList.end()); + } + + properties.push_back(std::unique_ptr( + new TypedListProperty::type>(propertyName, canonicalListVec))); + } + + /** + * @brief Get a vector of a data from a property for this element. Automatically promotes to larger types. Throws if + * requested data is unavailable. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector getProperty(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + + // Get a copy of the data with auto-promoting type magic + return getDataFromPropertyRecursive(prop.get()); + } + + /** + * @brief Get a vector of a data from a property for this element. Unlike getProperty(), only returns if the ply + * record contains a type that matches T exactly. Throws if * requested data is unavailable. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector getPropertyType(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + TypedProperty* castedProp = dynamic_cast*>(prop); + if (castedProp) { + return castedProp->data; + } + + // No match, failure + throw std::runtime_error("PLY parser: property " + prop->name + " is not of type type " + typeName() + + ". Has type " + prop->propertyTypeName()); + } + + /** + * @brief Get a vector of lists of data from a property for this element. Automatically promotes to larger types. + * Throws if requested data is unavailable. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector> getListProperty(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + + // Get a copy of the data with auto-promoting type magic + return getDataFromListPropertyRecursive(prop.get()); + } + + /** + * @brief Get a vector of a data from a property for this element. Unlike getProperty(), only returns if the ply + * record contains a type that matches T exactly. Throws if * requested data is unavailable. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector> getListPropertyType(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + TypedListProperty* castedProp = dynamic_cast*>(prop); + if (castedProp) { + return unflattenList(castedProp->flattenedData, castedProp->flattenedIndexStart); + } + + // No match, failure + throw std::runtime_error("PLY parser: list property " + prop->name + " is not of type " + typeName() + + ". Has type " + prop->propertyTypeName()); + } + + + /** + * @brief Get a vector of lists of data from a property for this element. Automatically promotes to larger types. + * Unlike getListProperty(), this method will additionally convert between types of different sign (eg, requesting and + * int32 would get data from a uint32); doing so naively converts between signed and unsigned types. This is typically + * useful for data representing indices, which might be stored as signed or unsigned numbers. + * + * @tparam T The type of data requested + * @param propertyName The name of the property to get. + * + * @return The data. + */ + template + std::vector> getListPropertyAnySign(const std::string& propertyName) { + + // Find the property + std::unique_ptr& prop = getPropertyPtr(propertyName); + + // Get a copy of the data with auto-promoting type magic + try { + // First, try the usual approach, looking for a version of the property with the same signed-ness and possibly + // smaller size + return getDataFromListPropertyRecursive(prop.get()); + } catch (const std::runtime_error& orig_e) { + + // If the usual approach fails, look for a version with opposite signed-ness + try { + + // This type has the oppopsite signeness as the input type + typedef typename CanonicalName::type Tcan; + typedef typename std::conditional::value, typename std::make_unsigned::type, + typename std::make_signed::type>::type OppsignType; + + return getDataFromListPropertyRecursive(prop.get()); + + } catch (const std::runtime_error&) { + throw orig_e; + } + + throw orig_e; + } + } + + + /** + * @brief Performs sanity checks on the element, throwing if any fail. + */ + void validate() { + + // Make sure no properties have duplicate names, and no names have whitespace + for (size_t iP = 0; iP < properties.size(); iP++) { + for (char c : properties[iP]->name) { + if (std::isspace(c)) { + throw std::runtime_error("Ply validate: illegal whitespace in name " + properties[iP]->name); + } + } + for (size_t jP = iP + 1; jP < properties.size(); jP++) { + if (properties[iP]->name == properties[jP]->name) { + throw std::runtime_error("Ply validate: multiple properties with name " + properties[iP]->name); + } + } + } + + // Make sure all properties have right length + for (size_t iP = 0; iP < properties.size(); iP++) { + if (properties[iP]->size() != count) { + throw std::runtime_error("Ply validate: property has wrong size. " + properties[iP]->name + + " does not match element size."); + } + } + } + + /** + * @brief Writes out this element's information to the file header. + * + * @param outStream The stream to use. + */ + void writeHeader(std::ostream& outStream) { + + outStream << "element " << name << " " << count << "\n"; + + for (std::unique_ptr& p : properties) { + p->writeHeader(outStream); + } + } + + /** + * @brief (ASCII writing) Writes out all of the data for every element of this element type to the stream, including + * all contained properties. + * + * @param outStream The stream to write to. + */ + void writeDataASCII(std::ostream& outStream) { + // Question: what is the proper output for an element with no properties? Here, we write a blank line, so there is + // one line per element no matter what. + for (size_t iE = 0; iE < count; iE++) { + for (size_t iP = 0; iP < properties.size(); iP++) { + properties[iP]->writeDataASCII(outStream, iE); + if (iP < properties.size() - 1) { + outStream << " "; + } + } + outStream << "\n"; + } + } + + + /** + * @brief (binary writing) Writes out all of the data for every element of this element type to the stream, including + * all contained properties. + * + * @param outStream The stream to write to. + */ + void writeDataBinary(std::ostream& outStream) { + for (size_t iE = 0; iE < count; iE++) { + for (size_t iP = 0; iP < properties.size(); iP++) { + properties[iP]->writeDataBinary(outStream, iE); + } + } + } + + + /** + * @brief (binary writing) Writes out all of the data for every element of this element type to the stream, including + * all contained properties. + * + * @param outStream The stream to write to. + */ + void writeDataBinaryBigEndian(std::ostream& outStream) { + for (size_t iE = 0; iE < count; iE++) { + for (size_t iP = 0; iP < properties.size(); iP++) { + properties[iP]->writeDataBinaryBigEndian(outStream, iE); + } + } + } + + + /** + * @brief Helper function which does the hard work to implement type promotion for data getters. Throws if type + * conversion fails. + * + * @tparam D The desired output type + * @tparam T The current attempt for the actual type of the property + * @param prop The property to get (does not delete nor share pointer) + * + * @return The data, with the requested type + */ + template + std::vector getDataFromPropertyRecursive(Property* prop) { + + typedef typename CanonicalName::type Tcan; + + { // Try to return data of type D from a property of type T + TypedProperty* castedProp = dynamic_cast*>(prop); + if (castedProp) { + // Succeeded, return a buffer of the data (copy while converting type) + std::vector castedVec; + castedVec.reserve(castedProp->data.size()); + for (Tcan& v : castedProp->data) { + castedVec.push_back(static_cast(v)); + } + return castedVec; + } + } + + TypeChain chainType; + if (chainType.hasChildType) { + return getDataFromPropertyRecursive::type>(prop); + } else { + // No smaller type to try, failure + throw std::runtime_error("PLY parser: property " + prop->name + " cannot be coerced to requested type " + + typeName() + ". Has type " + prop->propertyTypeName()); + } + } + + + /** + * @brief Helper function which does the hard work to implement type promotion for list data getters. Throws if type + * conversion fails. + * + * @tparam D The desired output type + * @tparam T The current attempt for the actual type of the property + * @param prop The property to get (does not delete nor share pointer) + * + * @return The data, with the requested type + */ + template + std::vector> getDataFromListPropertyRecursive(Property* prop) { + typedef typename CanonicalName::type Tcan; + + TypedListProperty* castedProp = dynamic_cast*>(prop); + if (castedProp) { + // Succeeded, return a buffer of the data (copy while converting type) + + // Convert to flat buffer of new type + std::vector* castedFlatVec = nullptr; + std::vector castedFlatVecCopy; // we _might_ make a copy here, depending on is_same below + + if (std::is_same, std::vector>::value) { + // just use the array we already have + castedFlatVec = addressIfSame>(castedProp->flattenedData, 0 /* dummy arg to disambiguate */); + } else { + // make a copy + castedFlatVecCopy.reserve(castedProp->flattenedData.size()); + for (Tcan& v : castedProp->flattenedData) { + castedFlatVecCopy.push_back(static_cast(v)); + } + castedFlatVec = &castedFlatVecCopy; + } + + // Unflatten and return + return unflattenList(*castedFlatVec, castedProp->flattenedIndexStart); + } + + TypeChain chainType; + if (chainType.hasChildType) { + return getDataFromListPropertyRecursive::type>(prop); + } else { + // No smaller type to try, failure + throw std::runtime_error("PLY parser: list property " + prop->name + + " cannot be coerced to requested type list " + typeName() + ". Has type list " + + prop->propertyTypeName()); + } + } +}; + + +// Some string helpers +namespace { + +inline std::string trimSpaces(const std::string& input) { + size_t start = 0; + while (start < input.size() && input[start] == ' ') start++; + size_t end = input.size(); + while (end > start && (input[end - 1] == ' ' || input[end - 1] == '\n' || input[end - 1] == '\r')) end--; + return input.substr(start, end - start); +} + +inline std::vector tokenSplit(const std::string& input) { + std::vector result; + size_t curr = 0; + size_t found = 0; + while ((found = input.find_first_of(' ', curr)) != std::string::npos) { + std::string token = input.substr(curr, found - curr); + token = trimSpaces(token); + if (token.size() > 0) { + result.push_back(token); + } + curr = found + 1; + } + std::string token = input.substr(curr); + token = trimSpaces(token); + if (token.size() > 0) { + result.push_back(token); + } + + return result; +} + +inline bool startsWith(const std::string& input, const std::string& query) { + return input.compare(0, query.length(), query) == 0; +} +}; // namespace + + +/** + * @brief Primary class; represents a set of data in the .ply format. + */ +class PLYData { + +public: + /** + * @brief Create an empty PLYData object. + */ + PLYData(){}; + + /** + * @brief Initialize a PLYData by reading from a file. Throws if any failures occur. + * + * @param filename The file to read from. + * @param verbose If true, print useful info about the file to stdout + */ + PLYData(const std::string& filename, bool verbose = false) { + + using std::cout; + using std::endl; + using std::string; + using std::vector; + + if (verbose) cout << "PLY parser: Reading ply file: " << filename << endl; + + // Open a file in binary always, in case it turns out to have binary data. + std::ifstream inStream(filename, std::ios::binary); + if (inStream.fail()) { + throw std::runtime_error("PLY parser: Could not open file " + filename); + } + + parsePLY(inStream, verbose); + + if (verbose) { + cout << " - Finished parsing file." << endl; + } + } + + /** + * @brief Initialize a PLYData by reading from a stringstream. Throws if any failures occur. + * + * @param inStream The stringstream to read from. + * @param verbose If true, print useful info about the file to stdout + */ + PLYData(std::istream& inStream, bool verbose = false) { + + using std::cout; + using std::endl; + + if (verbose) cout << "PLY parser: Reading ply file from stream" << endl; + + parsePLY(inStream, verbose); + + if (verbose) { + cout << " - Finished parsing stream." << endl; + } + } + + /** + * @brief Perform sanity checks on the file, throwing if any fail. + */ + void validate() { + + for (size_t iE = 0; iE < elements.size(); iE++) { + for (char c : elements[iE].name) { + if (std::isspace(c)) { + throw std::runtime_error("Ply validate: illegal whitespace in element name " + elements[iE].name); + } + } + for (size_t jE = iE + 1; jE < elements.size(); jE++) { + if (elements[iE].name == elements[jE].name) { + throw std::runtime_error("Ply validate: duplcate element name " + elements[iE].name); + } + } + } + + // Do a quick validation sanity check + for (Element& e : elements) { + e.validate(); + } + } + + /** + * @brief Write this data to a .ply file. + * + * @param filename The file to write to. + * @param format The format to use (binary or ascii?) + */ + void write(const std::string& filename, DataFormat format = DataFormat::ASCII) { + outputDataFormat = format; + + validate(); + + // Open stream for writing + std::ofstream outStream(filename, std::ios::out | std::ios::binary); + if (!outStream.good()) { + throw std::runtime_error("Ply writer: Could not open output file " + filename + " for writing"); + } + + writePLY(outStream); + } + + /** + * @brief Write this data to an output stream + * + * @param outStream The output stream to write to. + * @param format The format to use (binary or ascii?) + */ + void write(std::ostream& outStream, DataFormat format = DataFormat::ASCII) { + outputDataFormat = format; + + validate(); + + writePLY(outStream); + } + + /** + * @brief Get an element type by name ("vertices") + * + * @param target The name of the element type to get + * + * @return A reference to the element type. + */ + Element& getElement(const std::string& target) { + for (Element& e : elements) { + if (e.name == target) return e; + } + throw std::runtime_error("PLY parser: no element with name: " + target); + } + + + /** + * @brief Check if an element type exists + * + * @param target The name to check for. + * + * @return True if exists. + */ + bool hasElement(const std::string& target) { + for (Element& e : elements) { + if (e.name == target) return true; + } + return false; + } + + + /** + * @brief A list of the names of all elements + * + * @return Element names + */ + std::vector getElementNames() { + std::vector names; + for (Element& e : elements) { + names.push_back(e.name); + } + return names; + } + + + /** + * @brief Add a new element type to the object + * + * @param name The name of the new element type ("vertices"). + * @param count The number of elements of this type. + */ + void addElement(const std::string& name, size_t count) { elements.emplace_back(name, count); } + + // === Common-case helpers + + + /** + * @brief Common-case helper get mesh vertex positions + * + * @param vertexElementName The element name to use (default: "vertex") + * + * @return A vector of vertex positions. + */ + std::vector> getVertexPositions(const std::string& vertexElementName = "vertex") { + + std::vector xPos = getElement(vertexElementName).getProperty("x"); + std::vector yPos = getElement(vertexElementName).getProperty("y"); + std::vector zPos = getElement(vertexElementName).getProperty("z"); + + std::vector> result(xPos.size()); + for (size_t i = 0; i < result.size(); i++) { + result[i][0] = xPos[i]; + result[i][1] = yPos[i]; + result[i][2] = zPos[i]; + } + + return result; + } + + /** + * @brief Common-case helper get mesh vertex colors + * + * @param vertexElementName The element name to use (default: "vertex") + * + * @return A vector of vertex colors (unsigned chars [0,255]). + */ + std::vector> getVertexColors(const std::string& vertexElementName = "vertex") { + + std::vector r = getElement(vertexElementName).getProperty("red"); + std::vector g = getElement(vertexElementName).getProperty("green"); + std::vector b = getElement(vertexElementName).getProperty("blue"); + + std::vector> result(r.size()); + for (size_t i = 0; i < result.size(); i++) { + result[i][0] = r[i]; + result[i][1] = g[i]; + result[i][2] = b[i]; + } + + return result; + } + + /** + * @brief Common-case helper to get face indices for a mesh. If not template type is given, size_t is used. Naively + * converts to requested signedness, which may lead to unexpected values if an unsigned type is used and file contains + * negative values. + * + * @return The indices into the vertex elements for each face. Usually 0-based, though there are no formal rules. + */ + template + std::vector> getFaceIndices() { + + for (const std::string& f : std::vector{"face"}) { + for (const std::string& p : std::vector{"vertex_indices", "vertex_index"}) { + try { + return getElement(f).getListPropertyAnySign(p); + } catch (const std::runtime_error&) { + // that's fine + } + } + } + throw std::runtime_error("PLY parser: could not find face vertex indices attribute under any common name."); + } + + + /** + * @brief Common-case helper set mesh vertex positons. Creates vertex element, if necessary. + * + * @param vertexPositions A vector of vertex positions + */ + void addVertexPositions(std::vector>& vertexPositions) { + + std::string vertexName = "vertex"; + size_t N = vertexPositions.size(); + + // Create the element + if (!hasElement(vertexName)) { + addElement(vertexName, N); + } + + // De-interleave + std::vector xPos(N); + std::vector yPos(N); + std::vector zPos(N); + for (size_t i = 0; i < vertexPositions.size(); i++) { + xPos[i] = vertexPositions[i][0]; + yPos[i] = vertexPositions[i][1]; + zPos[i] = vertexPositions[i][2]; + } + + // Store + getElement(vertexName).addProperty("x", xPos); + getElement(vertexName).addProperty("y", yPos); + getElement(vertexName).addProperty("z", zPos); + } + + /** + * @brief Common-case helper set mesh vertex colors. Creates a vertex element, if necessary. + * + * @param colors A vector of vertex colors (unsigned chars [0,255]). + */ + void addVertexColors(std::vector>& colors) { + + std::string vertexName = "vertex"; + size_t N = colors.size(); + + // Create the element + if (!hasElement(vertexName)) { + addElement(vertexName, N); + } + + // De-interleave + std::vector r(N); + std::vector g(N); + std::vector b(N); + for (size_t i = 0; i < colors.size(); i++) { + r[i] = colors[i][0]; + g[i] = colors[i][1]; + b[i] = colors[i][2]; + } + + // Store + getElement(vertexName).addProperty("red", r); + getElement(vertexName).addProperty("green", g); + getElement(vertexName).addProperty("blue", b); + } + + /** + * @brief Common-case helper set mesh vertex colors. Creates a vertex element, if necessary. + * + * @param colors A vector of vertex colors as floating point [0,1] values. Internally converted to [0,255] chars. + */ + void addVertexColors(std::vector>& colors) { + + std::string vertexName = "vertex"; + size_t N = colors.size(); + + // Create the element + if (!hasElement(vertexName)) { + addElement(vertexName, N); + } + + auto toChar = [](double v) { + if (v < 0.0) v = 0.0; + if (v > 1.0) v = 1.0; + return static_cast(v * 255.); + }; + + // De-interleave + std::vector r(N); + std::vector g(N); + std::vector b(N); + for (size_t i = 0; i < colors.size(); i++) { + r[i] = toChar(colors[i][0]); + g[i] = toChar(colors[i][1]); + b[i] = toChar(colors[i][2]); + } + + // Store + getElement(vertexName).addProperty("red", r); + getElement(vertexName).addProperty("green", g); + getElement(vertexName).addProperty("blue", b); + } + + + /** + * @brief Common-case helper to set face indices. Creates a face element if needed. The input type will be casted to a + * 32 bit integer of the same signedness. + * + * @param indices The indices into the vertex list around each face. + */ + template + void addFaceIndices(std::vector>& indices) { + + std::string faceName = "face"; + size_t N = indices.size(); + + // Create the element + if (!hasElement(faceName)) { + addElement(faceName, N); + } + + // Cast to 32 bit + typedef typename std::conditional::value, int32_t, uint32_t>::type IndType; + std::vector> intInds; + for (std::vector& l : indices) { + std::vector thisInds; + for (T& val : l) { + IndType valConverted = static_cast(val); + if (valConverted != val) { + throw std::runtime_error("Index value " + std::to_string(val) + + " could not be converted to a .ply integer without loss of data. Note that .ply " + "only supports 32-bit ints."); + } + thisInds.push_back(valConverted); + } + intInds.push_back(thisInds); + } + + // Store + getElement(faceName).addListProperty("vertex_indices", intInds); + } + + + /** + * @brief Comments for the file. When writing, each entry will be written as a sequential comment line. + */ + std::vector comments; + + + /** + * @brief obj_info comments for the file. When writing, each entry will be written as a sequential comment line. + */ + std::vector objInfoComments; + +private: + std::vector elements; + const int majorVersion = 1; // I'll buy you a drink if these ever get bumped + const int minorVersion = 0; + + DataFormat inputDataFormat = DataFormat::ASCII; // set when reading from a file + DataFormat outputDataFormat = DataFormat::ASCII; // option for writing files + + + // === Reading === + + /** + * @brief Parse a PLY file from an input stream + * + * @param inStream + * @param verbose + */ + void parsePLY(std::istream& inStream, bool verbose) { + + // == Process the header + parseHeader(inStream, verbose); + + + // === Parse data from a binary file + if (inputDataFormat == DataFormat::Binary) { + parseBinary(inStream, verbose); + } + // === Parse data from an binary file + else if (inputDataFormat == DataFormat::BinaryBigEndian) { + parseBinaryBigEndian(inStream, verbose); + } + // === Parse data from an ASCII file + else if (inputDataFormat == DataFormat::ASCII) { + parseASCII(inStream, verbose); + } + } + + /** + * @brief Read the header for a file + * + * @param inStream + * @param verbose + */ + void parseHeader(std::istream& inStream, bool verbose) { + + using std::cout; + using std::endl; + using std::string; + using std::vector; + + // First two lines are predetermined + { // First line is magic constant + string plyLine; + std::getline(inStream, plyLine); + if (trimSpaces(plyLine) != "ply") { + throw std::runtime_error("PLY parser: File does not appear to be ply file. First line should be 'ply'"); + } + } + + { // second line is version + string styleLine; + std::getline(inStream, styleLine); + vector tokens = tokenSplit(styleLine); + if (tokens.size() != 3) throw std::runtime_error("PLY parser: bad format line"); + std::string formatStr = tokens[0]; + std::string typeStr = tokens[1]; + std::string versionStr = tokens[2]; + + // "format" + if (formatStr != "format") throw std::runtime_error("PLY parser: bad format line"); + + // ascii/binary + if (typeStr == "ascii") { + inputDataFormat = DataFormat::ASCII; + if (verbose) cout << " - Type: ascii" << endl; + } else if (typeStr == "binary_little_endian") { + inputDataFormat = DataFormat::Binary; + if (verbose) cout << " - Type: binary" << endl; + } else if (typeStr == "binary_big_endian") { + inputDataFormat = DataFormat::BinaryBigEndian; + if (verbose) cout << " - Type: binary big endian" << endl; + } else { + throw std::runtime_error("PLY parser: bad format line"); + } + + // version + if (versionStr != "1.0") { + throw std::runtime_error("PLY parser: encountered file with version != 1.0. Don't know how to parse that"); + } + if (verbose) cout << " - Version: " << versionStr << endl; + } + + // Consume header line by line + while (inStream.good()) { + string line; + std::getline(inStream, line); + + // Parse a comment + if (startsWith(line, "comment")) { + string comment = line.substr(8); + if (verbose) cout << " - Comment: " << comment << endl; + comments.push_back(comment); + continue; + } + + // Parse an obj_info comment + if (startsWith(line, "obj_info")) { + string infoComment = line.substr(9); + if (verbose) cout << " - obj_info: " << infoComment << endl; + objInfoComments.push_back(infoComment); + continue; + } + + // Parse an element + else if (startsWith(line, "element")) { + vector tokens = tokenSplit(line); + if (tokens.size() != 3) throw std::runtime_error("PLY parser: Invalid element line"); + string name = tokens[1]; + size_t count; + std::istringstream iss(tokens[2]); + iss >> count; + elements.emplace_back(name, count); + if (verbose) cout << " - Found element: " << name << " (count = " << count << ")" << endl; + continue; + } + + // Parse a property list + else if (startsWith(line, "property list")) { + vector tokens = tokenSplit(line); + if (tokens.size() != 5) throw std::runtime_error("PLY parser: Invalid property list line"); + if (elements.size() == 0) throw std::runtime_error("PLY parser: Found property list without previous element"); + string countType = tokens[2]; + string type = tokens[3]; + string name = tokens[4]; + elements.back().properties.push_back(createPropertyWithType(name, type, true, countType)); + if (verbose) + cout << " - Found list property: " << name << " (count type = " << countType << ", data type = " << type + << ")" << endl; + continue; + } + + // Parse a property + else if (startsWith(line, "property")) { + vector tokens = tokenSplit(line); + if (tokens.size() != 3) throw std::runtime_error("PLY parser: Invalid property line"); + if (elements.size() == 0) throw std::runtime_error("PLY parser: Found property without previous element"); + string type = tokens[1]; + string name = tokens[2]; + elements.back().properties.push_back(createPropertyWithType(name, type, false, "")); + if (verbose) cout << " - Found property: " << name << " (type = " << type << ")" << endl; + continue; + } + + // Parse end of header + else if (startsWith(line, "end_header")) { + break; + } + + // Error! + else { + throw std::runtime_error("Unrecognized header line: " + line); + } + } + } + + /** + * @brief Read the actual data for a file, in ASCII + * + * @param inStream + * @param verbose + */ + void parseASCII(std::istream& inStream, bool verbose) { + + using std::string; + using std::vector; + + // Read all elements + for (Element& elem : elements) { + + if (verbose) { + std::cout << " - Processing element: " << elem.name << std::endl; + } + + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->reserve(elem.count); + } + for (size_t iEntry = 0; iEntry < elem.count; iEntry++) { + + string line; + std::getline(inStream, line); + + // Some .ply files seem to include empty lines before the start of property data (though this is not specified + // in the format description). We attempt to recover and parse such files by skipping any empty lines. + if (!elem.properties.empty()) { // if the element has no properties, the line _should_ be blank, presumably + while (line.empty()) { // skip lines until we hit something nonempty + std::getline(inStream, line); + } + } + + vector tokens = tokenSplit(line); + size_t iTok = 0; + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->parseNext(tokens, iTok); + } + } + } + } + + /** + * @brief Read the actual data for a file, in binary. + * + * @param inStream + * @param verbose + */ + void parseBinary(std::istream& inStream, bool verbose) { + + if (!isLittleEndian()) { + throw std::runtime_error("binary reading assumes little endian system"); + } + + using std::string; + using std::vector; + + // Read all elements + for (Element& elem : elements) { + + if (verbose) { + std::cout << " - Processing element: " << elem.name << std::endl; + } + + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->reserve(elem.count); + } + for (size_t iEntry = 0; iEntry < elem.count; iEntry++) { + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->readNext(inStream); + } + } + } + } + + /** + * @brief Read the actual data for a file, in binary. + * + * @param inStream + * @param verbose + */ + void parseBinaryBigEndian(std::istream& inStream, bool verbose) { + + if (!isLittleEndian()) { + throw std::runtime_error("binary reading assumes little endian system"); + } + + using std::string; + using std::vector; + + // Read all elements + for (Element& elem : elements) { + + if (verbose) { + std::cout << " - Processing element: " << elem.name << std::endl; + } + + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->reserve(elem.count); + } + for (size_t iEntry = 0; iEntry < elem.count; iEntry++) { + for (size_t iP = 0; iP < elem.properties.size(); iP++) { + elem.properties[iP]->readNextBigEndian(inStream); + } + } + } + } + + // === Writing === + + + /** + * @brief write a PLY file to an output stream + * + * @param outStream + */ + void writePLY(std::ostream& outStream) { + + writeHeader(outStream); + + // Write all elements + for (Element& e : elements) { + if (outputDataFormat == DataFormat::Binary) { + if (!isLittleEndian()) { + throw std::runtime_error("binary writing assumes little endian system"); + } + e.writeDataBinary(outStream); + } else if (outputDataFormat == DataFormat::BinaryBigEndian) { + if (!isLittleEndian()) { + throw std::runtime_error("binary writing assumes little endian system"); + } + e.writeDataBinaryBigEndian(outStream); + } else if (outputDataFormat == DataFormat::ASCII) { + e.writeDataASCII(outStream); + } + } + } + + + /** + * @brief Write out a header for a file + * + * @param outStream + */ + void writeHeader(std::ostream& outStream) { + + // Magic line + outStream << "ply\n"; + + // Type line + outStream << "format "; + if (outputDataFormat == DataFormat::Binary) { + outStream << "binary_little_endian "; + } else if (outputDataFormat == DataFormat::BinaryBigEndian) { + outStream << "binary_big_endian "; + } else if (outputDataFormat == DataFormat::ASCII) { + outStream << "ascii "; + } + + // Version number + outStream << majorVersion << "." << minorVersion << "\n"; + + // Write comments + bool hasHapplyComment = false; + std::string happlyComment = "Written with hapPLY (https://github.com/nmwsharp/happly)"; + for (const std::string& comment : comments) { + if (comment == happlyComment) hasHapplyComment = true; + outStream << "comment " << comment << "\n"; + } + if (!hasHapplyComment) { + outStream << "comment " << happlyComment << "\n"; + } + + // Write obj_info comments + for (const std::string& comment : objInfoComments) { + outStream << "obj_info " << comment << "\n"; + } + + // Write elements (and their properties) + for (Element& e : elements) { + e.writeHeader(outStream); + } + + // End header + outStream << "end_header\n"; + } +}; + +} // namespace happly diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 929f4302..84dfa173 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -20,7 +20,7 @@ The following environmental variables should be set up (please adjust them for your particular configuration): export ASTROBEE_SOURCE_PATH=$HOME/astrobee/src - export ASTROBEE_BUILD_PATH=$HOME/astrobee/build + export ASTROBEE_BUILD_PATH=$HOME/astrobee export ISAAC_WS=$HOME/isaac ## Robot sensors @@ -110,7 +110,8 @@ To compile Voxblox, clone https://github.com/ethz-asl/voxblox by the introduction of a small tool named batch_tsdf.cc that reads the clouds to fuse and the transforms from disk and writes the output mesh back to disk, instead -of using ROS. +of using ROS. It also can take into account a point's reliability when +fusing the clouds, which is computed by the geometry mapper. Compile it using the instructions at: @@ -286,17 +287,17 @@ robots using some established infrastructure. Alternatively, one can use `adb pull`. After this tool is used, the data can be manually deleted from HLP by first connecting to it with `adb shell`.) -In order to perform calibration, the sci cam data in the bag needs to +In order to run camera_calibrator, the sci cam data in the bag needs to be decompressed, resized to 1/4 of the resolution, and made to be grayscale. This is needed since the sci cam images are a little blurry and, at full resolution, the corners of the calibration target are hard to detect. The calibrator also does not handle the color format in the bag, hence the switch to grayscale images. -However, the both the geometry and the streaming mapper can handle -both color and grayscale images, and both at reduced resolution and -full-resolution. These tools can adjust for the fact that the -calibration was done at reduced resolution. +However, both the geometry and the streaming mapper, as well as +camera_refiner, can handle both color and grayscale images, and both +at reduced resolution and full-resolution. These tools can adjust for +the fact that the calibration was done at reduced resolution. To accomplish this processing, once the sci cam data is integrated into the bag, one can do the following: @@ -560,6 +561,8 @@ Nav cam images can be extracted from a bag as follows: -use_timestamp_as_image_name The last option, `-use_timestamp_as_image_name`, must not be missed. +It makes it easy to look up the image acquisition based on image name, +and this is used by the geometry mapper. Note that bags acquired on the ISS usually have the nav cam image topic as: @@ -645,19 +648,21 @@ Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, --camera_type all \ --start 0 \ --duration 1e+10 \ - --sampling_spacing_seconds 2 \ + --sampling_spacing_seconds 5 \ --dist_between_processed_cams 0.1 \ --depth_exclude_columns 0 \ --depth_exclude_rows 0 \ --foreshortening_delta 5.0 \ --median_filters "7 0.1 25 0.1" \ + --reliability_weight_exponent 2 \ + --voxblox_integrator merged \ --depth_hole_fill_diameter 30.0 \ - --max_ray_length 2 \ + --max_ray_length 2.5 \ --voxel_size 0.01 \ --max_iso_times_exposure 5.1 \ - --smoothing_time 1e-5 \ - --max_num_hole_edges 4000 \ - --max_hole_diameter 0.8 \ + --smoothing_time 5e-6 \ + --max_num_hole_edges 8000 \ + --max_hole_diameter 1.8 \ --num_min_faces_in_component 100 \ --num_components_to_keep 100 \ --edge_keep_ratio 0.2 \ @@ -703,9 +708,16 @@ Parameters: are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (see --max_hole_diameter). The default is 30. + --reliability_weight_exponent: A larger value will give more + weight to depth points corresponding to pixels closer to depth + image center, which are considered more reliable. The default is + 2.0. --max_ray_length_m: Dictates at what distance from the depth camera to truncate the rays (in meters). This can be small if the images are acquired close to walls and facing them. + --voxblox_integrator: When fusing the depth point clouds use + this VoxBlox method. Options are: "merged", "simple", and + "fast". The default is "merged". --voxel_size is the grid size used for binning the points, in meters. --max_iso_times_exposure: Apply the inverse gamma transform to images, multiply them by max_iso_times_exposure/ISO/exposure_time @@ -1074,12 +1086,14 @@ sci cam image using the tool: export ASTROBEE_ROBOT=bsharp2 source $ASTROBEE_BUILD_PATH/devel/setup.bash source $ISAAC_WS/devel/setup.bash - $ISAAC_WS/devel/lib/geometry_mapper/image_picker \ - --ros_bag mybag.bag \ - --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ + $ISAAC_WS/devel/lib/geometry_mapper/image_picker \ + --ros_bag mybag.bag \ + --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ + --sci_cam_topic /hw/cam_sci/compressed \ + --haz_cam_intensity_topic /hw/depth_haz/extended/amplitude_int \ --bracket_len 2.0 --output_nav_cam_dir nav_images -Setting up the correct bot name is very important. +Setting up the correct robot name above is very important. The --bracket_len option brackets sci cam images by nav cam images so the length of time between these two nav cam images is at most this @@ -1121,7 +1135,7 @@ are added back. ### Map building and registration -Build a sparse map with these images. Use the same environemnt as +Build a sparse map with these images. Use the same environment as above: dir=nav_images @@ -1177,6 +1191,7 @@ and hence keeping its registration, as: $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/merge_maps \ --fix_first_map \ --num_image_overlaps_at_endpoints 200 \ + --min_valid_angle 1.0 \ registered_submap.map $surf_map \ --output_map merged.map @@ -1210,7 +1225,7 @@ point to that. --fix_map --skip_registration --float_scale \ --timestamp_interpolation --robust_threshold 3 \ --sci_cam_intrinsics_to_float \ - 'focal_length optical center distortion' \ + 'focal_length optical_center distortion' \ --mesh mesh.ply --mesh_weight 25 \ --mesh_robust_threshold 3 @@ -1292,7 +1307,7 @@ This program's options are: --sci_cam_intrinsics_to_float: Refine 0 or more of the following intrinsics for sci_cam: focal_length, optical_center, distortion. Specify as a quoted list. For example: - 'focal_length optical center distortion'. + 'focal_length optical_center distortion'. --timestamp_interpolation: If true, interpolate between timestamps. May give better results if the robot is known to move uniformly, and perhaps less so for stop-and-go robot motion. @@ -1333,7 +1348,7 @@ A geometry mapper run directory has all the inputs this tool needs. It can be run as follows: export ASTROBEE_SOURCE_PATH=$HOME/projects/astrobee/src - export ASTROBEE_BUILD_PATH=$HOME/projects/astrobee/build + export ASTROBEE_BUILD_PATH=$HOME/projects/astrobee export ISAAC_WS=$HOME/projects/isaac source $ASTROBEE_BUILD_PATH/devel/setup.bash source $ISAAC_WS/devel/setup.bash diff --git a/dense_map/geometry_mapper/src/dense_map_ros_utils.cc b/dense_map/geometry_mapper/src/dense_map_ros_utils.cc index e9c31d98..23e74a3e 100644 --- a/dense_map/geometry_mapper/src/dense_map_ros_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_ros_utils.cc @@ -118,6 +118,53 @@ void readBagImageTimestamps(std::string const& bag_file, std::string const& topi } } +// Given a bag view, for each topic in the view read the vector of +// messages for that topic, sorted by message header timestamp. Only +// the following sensor types are supported: sensor_msgs::Image, +// sensor_msgs::CompressedImage, and sensor_msgs::PointCloud2. +void indexMessages(rosbag::View& view, // view can't be made const + std::map>& bag_map) { + bag_map.clear(); + + // First put the data in maps so that we can sort it + std::map> local_map; + for (rosbag::MessageInstance const m : view) { + // Check for regular image message + sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); + if (image_msg) { + double curr_time = image_msg->header.stamp.toSec(); + local_map[m.getTopic()].insert(std::make_pair(curr_time, m)); + continue; + } + + // Check for compressed image message + sensor_msgs::CompressedImage::ConstPtr comp_image_msg = + m.instantiate(); + if (comp_image_msg) { + double curr_time = comp_image_msg->header.stamp.toSec(); + local_map[m.getTopic()].insert(std::make_pair(curr_time, m)); + continue; + } + + // Check for cloud + sensor_msgs::PointCloud2::ConstPtr pc_msg = m.instantiate(); + if (pc_msg) { + double curr_time = pc_msg->header.stamp.toSec(); + local_map[m.getTopic()].insert(std::make_pair(curr_time, m)); + continue; + } + } + + // Add the data in sorted order + for (auto topic_it = local_map.begin(); topic_it != local_map.end(); topic_it++) { + auto& topic_name = topic_it->first; // alias + auto& topic_map = topic_it->second; // alias + + for (auto msg_it = topic_map.begin(); msg_it != topic_map.end() ; msg_it++) + bag_map[topic_name].push_back(msg_it->second); + } +} + // Read exif data from a bag void readExifFromBag(std::vector const& bag_msgs, std::map >& exif) { @@ -142,8 +189,8 @@ void readExifFromBag(std::vector const& bag_msgs, // that during repeated calls to this function we always travel // forward in time, and we keep track of where we are in the bag using // the variable bag_pos that we update as we go. -bool lookupImage(double desired_time, std::vector const& bag_msgs, bool save_grayscale, - cv::Mat& image, int& bag_pos, double& found_time) { +bool lookupImage(double desired_time, std::vector const& bag_msgs, + bool save_grayscale, cv::Mat& image, int& bag_pos, double& found_time) { found_time = -1.0; // Record the time at which the image was found int num_msgs = bag_msgs.size(); double prev_image_time = -1.0; @@ -152,27 +199,35 @@ bool lookupImage(double desired_time, std::vector const bag_pos = local_pos; // save this for exporting // Check for uncompressed images - sensor_msgs::Image::ConstPtr image_msg = bag_msgs[local_pos].instantiate(); + sensor_msgs::Image::ConstPtr image_msg + = bag_msgs[local_pos].instantiate(); if (image_msg) { ros::Time stamp = image_msg->header.stamp; found_time = stamp.toSec(); // Sanity check: We must always travel forward in time - if (found_time < prev_image_time) LOG(FATAL) << "Time in the bag must be increasing."; + if (found_time < prev_image_time) { + LOG(ERROR) << "Found images in a bag not in chronological order. Caution advised.\n" + << std::fixed << std::setprecision(17) + << "Times in wrong order: " << prev_image_time << ' ' << found_time << ".\n"; + continue; + } prev_image_time = found_time; if (found_time >= desired_time) { try { if (!save_grayscale) { - image = cv_bridge::toCvShare(image_msg, "bgr8")->image; + // Do a copy, as image_msg may soon run out of scope + (cv_bridge::toCvShare(image_msg, "bgr8")->image).copyTo(image); } else { - // for some reason, looking up the image as grayscale does not work, + // For some reason, looking up the image as grayscale does not work, // so first it needs to be looked up as color and then converted. cv::Mat tmp_image = cv_bridge::toCvShare(image_msg, "bgr8")->image; cv::cvtColor(tmp_image, image, cv::COLOR_BGR2GRAY); } } catch (cv_bridge::Exception const& e) { - ROS_ERROR_STREAM("Unable to convert " << image_msg->encoding.c_str() << " image to bgr8."); + ROS_ERROR_STREAM("Unable to convert " << image_msg->encoding.c_str() + << " image to bgr8."); return false; } return true; @@ -187,7 +242,12 @@ bool lookupImage(double desired_time, std::vector const found_time = stamp.toSec(); // Sanity check: We must always travel forward in time - if (found_time < prev_image_time) LOG(FATAL) << "Time in the bag must be increasing."; + if (found_time < prev_image_time) { + LOG(ERROR) << "Found images in a bag not in chronological order. Caution advised.\n" + << std::fixed << std::setprecision(17) + << "Times in wrong order: " << prev_image_time << ' ' << found_time << ".\n"; + continue; + } prev_image_time = found_time; if (found_time >= desired_time) { @@ -209,14 +269,14 @@ bool lookupImage(double desired_time, std::vector const return false; } -// Find the closest depth cloud to given timestamp. Return an empty -// one if one cannot be found closer in time than max_time_diff. -// Store it as a cv::Mat of vec3f values. We assume that during -// repeated calls to this function we always travel forward in time, -// and we keep track of where we are in the bag using the variable -// bag_pos that we update as we go. -bool lookupCloud(double desired_time, std::vector const& bag_msgs, double max_time_diff, - cv::Mat& cloud, int& bag_pos, double& found_time) { +// Find the closest depth cloud to given timestamp (before or after +// it). Return an empty one if one cannot be found closer in time than +// max_time_diff. Store it as a cv::Mat of vec3f values. We assume +// that during repeated calls to this function we always travel +// forward in time, and we keep track of where we are in the bag using +// the variable bag_pos that we update as we go. +bool lookupCloud(double desired_time, std::vector const& bag_msgs, + double max_time_diff, cv::Mat& cloud, int& bag_pos, double& found_time) { int num_msgs = bag_msgs.size(); double prev_time = -1.0; found_time = -1.0; @@ -227,12 +287,18 @@ bool lookupCloud(double desired_time, std::vector const for (int local_pos = bag_pos; local_pos < num_msgs; local_pos++) { // Check for cloud - sensor_msgs::PointCloud2::ConstPtr curr_pc_msg = bag_msgs[local_pos].instantiate(); + sensor_msgs::PointCloud2::ConstPtr curr_pc_msg + = bag_msgs[local_pos].instantiate(); if (!curr_pc_msg) continue; double curr_time = curr_pc_msg->header.stamp.toSec(); // Sanity check: We must always travel forward in time - if (curr_time < prev_time) LOG(FATAL) << "Time in the bag must be increasing."; + if (curr_time < prev_time) { + LOG(ERROR) << "Found images in a bag not in chronological order. Caution advised.\n" + << std::fixed << std::setprecision(17) + << "Times in wrong order: " << prev_time << ' ' << curr_time << ".\n"; + continue; + } // We are not yet at the stage where we can make decisions, so just keep on going if (curr_time <= desired_time) { @@ -261,14 +327,16 @@ bool lookupCloud(double desired_time, std::vector const // Decode the cloud pcl::PointCloud pc; pcl::fromROSMsg(*curr_pc_msg, pc); - if (static_cast(pc.points.size()) != static_cast(curr_pc_msg->width * curr_pc_msg->height)) + if (static_cast(pc.points.size()) != + static_cast(curr_pc_msg->width * curr_pc_msg->height)) LOG(FATAL) << "Extracted point cloud size does not agree with original size."; cloud = cv::Mat::zeros(curr_pc_msg->height, curr_pc_msg->width, CV_32FC3); for (int row = 0; row < curr_pc_msg->height; row++) { for (int col = 0; col < curr_pc_msg->width; col++) { int count = row * curr_pc_msg->width + col; - cloud.at(row, col) = cv::Vec3f(pc.points[count].x, pc.points[count].y, pc.points[count].z); + cloud.at(row, col) + = cv::Vec3f(pc.points[count].x, pc.points[count].y, pc.points[count].z); } } diff --git a/dense_map/geometry_mapper/src/dense_map_utils.cc b/dense_map/geometry_mapper/src/dense_map_utils.cc index c330334d..29ff99d4 100644 --- a/dense_map/geometry_mapper/src/dense_map_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_utils.cc @@ -845,8 +845,10 @@ void pickTimestampsInBounds(std::vector const& timestamps, double left_b } // Triangulate rays emanating from given undistorted and centered pixels -Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eigen::Affine3d const& world_to_cam1, - Eigen::Affine3d const& world_to_cam2, Eigen::Vector2d const& pix1, +Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, + Eigen::Affine3d const& world_to_cam1, + Eigen::Affine3d const& world_to_cam2, + Eigen::Vector2d const& pix1, Eigen::Vector2d const& pix2) { Eigen::Matrix3d k1; k1 << focal_length1, 0, 0, 0, focal_length1, 0, 0, 0, 1; @@ -866,6 +868,34 @@ Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eige return solution; } +// Triangulate n rays emanating from given undistorted and centered pixels +Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, + std::vector const& world_to_cam_vec, + std::vector const& pix_vec) { + if (focal_length_vec.size() != world_to_cam_vec.size() || + focal_length_vec.size() != pix_vec.size()) + LOG(FATAL) << "All inputs to Triangulate() must have the same size."; + + if (focal_length_vec.size() <= 1) + LOG(FATAL) << "At least two rays must be passed to Triangulate()."; + + openMVG::Triangulation tri; + + for (size_t it = 0; it < focal_length_vec.size(); it++) { + Eigen::Matrix3d k; + k << focal_length_vec[it], 0, 0, 0, focal_length_vec[it], 0, 0, 0, 1; + + openMVG::Mat34 cid_to_p; + openMVG::P_From_KRt(k, world_to_cam_vec[it].linear(), world_to_cam_vec[it].translation(), + &cid_to_p); + + tri.add(cid_to_p, pix_vec[it]); + } + + Eigen::Vector3d solution = tri.compute(); + return solution; +} + // A debug utility for saving a camera in a format ASP understands. // Need to expose the sci cam intrinsics. void save_tsai_camera(Eigen::MatrixXd const& desired_cam_to_world_trans, std::string const& output_dir, diff --git a/dense_map/geometry_mapper/src/interest_point.cc b/dense_map/geometry_mapper/src/interest_point.cc index 12cd4155..7cef6c1c 100644 --- a/dense_map/geometry_mapper/src/interest_point.cc +++ b/dense_map/geometry_mapper/src/interest_point.cc @@ -89,7 +89,8 @@ void detectFeatures(const cv::Mat& image, bool verbose, // This really likes haz cam first and nav cam second void matchFeatures(std::mutex* match_mutex, int left_image_index, int right_image_index, cv::Mat const& left_descriptors, cv::Mat const& right_descriptors, - Eigen::Matrix2Xd const& left_keypoints, Eigen::Matrix2Xd const& right_keypoints, bool verbose, + Eigen::Matrix2Xd const& left_keypoints, + Eigen::Matrix2Xd const& right_keypoints, bool verbose, // output MATCH_PAIR* matches) { std::vector cv_matches; @@ -108,17 +109,17 @@ void matchFeatures(std::mutex* match_mutex, int left_image_index, int right_imag if (left_vec.empty()) return; - // These may need some tweaking but work reasonably well. + // These may need some tweaking but works reasonably well. double ransacReprojThreshold = 20.0; cv::Mat inlier_mask; int maxIters = 10000; double confidence = 0.8; + // affine2D works better than homography // cv::Mat H = cv::findHomography(left_vec, right_vec, cv::RANSAC, - // ransacReprojThreshold, - // inlier_mask, maxIters, confidence); - cv::Mat H = - cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, ransacReprojThreshold, maxIters, confidence); + // ransacReprojThreshold, inlier_mask, maxIters, confidence); + cv::Mat H = cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, + ransacReprojThreshold, maxIters, confidence); std::vector left_ip, right_ip; for (size_t j = 0; j < cv_matches.size(); j++) { @@ -140,10 +141,16 @@ void matchFeatures(std::mutex* match_mutex, int left_image_index, int right_imag right_ip.push_back(right); } - if (verbose) std::cout << "Number of matches: " << left_ip.size() << std::endl; - // Update the shared variable using a lock match_mutex->lock(); + + // Print the verbose message inside the lock, otherwise the text + // may get messed up. + if (verbose) + std::cout << "Number of matches for pair " + << left_image_index << ' ' << right_image_index << ": " + << left_ip.size() << std::endl; + *matches = std::make_pair(left_ip, right_ip); match_mutex->unlock(); } diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index 0d375bfe..56dc9d9e 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -17,12 +17,9 @@ * under the License. */ -// TODO(oalexan1): Must not use haz cam frames outside the bracket. -// TODO(oalexan1): In the refiner, get a max bracket size. // TODO(oalexan1): Consider adding a haz cam to haz cam // reprojection error in the camera refiner. There will be more // haz to haz matches than haz to nav or haz to sci. - #include #include #include @@ -144,7 +141,7 @@ DEFINE_bool(float_scale, false, DEFINE_string(sci_cam_intrinsics_to_float, "", "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical center'."); + "For example: 'focal_length optical_center'."); DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, std::numeric_limits::quiet_NaN(), @@ -170,38 +167,8 @@ DEFINE_bool(verbose, false, namespace dense_map { - // TODO(oalexan1): Store separately matches which end up being - // squashed in a pid_cid_to_fid. - - // A class to encompass all known information about a camera - // This is work in progress and will replace some of the logic further down. - struct cameraImage { - // An index to look up the type of camera. This will equal - // the value ref_camera_type if and only if this is a reference camera. - int camera_type; - - // The timestamp for this camera (in floating point seconds since epoch) - double timestamp; - - // Indices to look up the left and right reference cameras bracketing this camera - // in time. The two indices will have same value if and only if this is a reference - // camera. - int left_ref_cam_index; - int right_ref_ca_index; - - // Indices to the reference camera timestamps bracketing this timestamp in time. - // The two indices will have the same value if and only if this is a reference - // camera. - int left_ref_timestamp_index; - int right_ref_timestamp_index; - - // Index used to look up the reference camera to given camera rigid transform. - // That transform is the identity if and only if this is a reference camera. - int ref_cam_to_cam_index; - - // The image for this camera, in grayscale - cv::Mat image; - }; +// TODO(oalexan1): Store separately matches which end up being +// squashed in a pid_cid_to_fid. ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { // Convert to lower-case @@ -230,7 +197,7 @@ struct DepthToHazError { camera::CameraParameters const& haz_cam_params) : m_haz_pix(haz_pix), m_depth_xyz(depth_xyz), m_block_sizes(block_sizes), m_haz_cam_params(haz_cam_params) { // Sanity check. - if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_SCALE_PARAMS) { + if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_SCALAR_PARAMS) { LOG(FATAL) << "DepthToHazError: The block sizes were not set up properly.\n"; } } @@ -371,7 +338,7 @@ struct DepthToNavError { // Sanity check. if (m_block_sizes.size() != 5 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_SCALE_PARAMS) { + m_block_sizes[4] != NUM_SCALAR_PARAMS) { LOG(FATAL) << "DepthToNavError: The block sizes were not set up properly.\n"; } } @@ -475,7 +442,7 @@ struct DepthToSciError { // Sanity check. if (m_block_sizes.size() != 9 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_RIGID_PARAMS || m_block_sizes[5] != NUM_SCALE_PARAMS || + m_block_sizes[4] != NUM_RIGID_PARAMS || m_block_sizes[5] != NUM_SCALAR_PARAMS || m_block_sizes[6] != m_num_focal_lengths || m_block_sizes[7] != NUM_OPT_CTR_PARAMS || m_block_sizes[8] != m_sci_cam_params.GetDistortion().size()) { LOG(FATAL) << "DepthToSciError: The block sizes were not set up properly.\n"; @@ -800,27 +767,28 @@ void calc_median_residuals(std::vector const& residuals, } // Prevent the linter from messing up with the beautiful formatting below - void add_haz_nav_cost(// Inputs // NOLINT - int haz_it, int nav_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & haz_cam_intensity_timestamps, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::map const & haz_cam_to_left_nav_cam_index, // NOLINT - std::map const & haz_cam_to_right_nav_cam_index, // NOLINT - camera::CameraParameters const & nav_cam_params, // NOLINT - camera::CameraParameters const & haz_cam_params, // NOLINT - std::vector const & depth_to_nav_block_sizes, // NOLINT - std::vector const & depth_to_haz_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - // Outputs // NOLINT - std::vector & residual_names, // NOLINT - double & hazcam_depth_to_image_scale, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & hazcam_depth_to_image_vec, // NOLINT - ceres::Problem & problem) { // NOLINT + void add_haz_nav_cost // NOLINT + (// Inputs // NOLINT + int haz_it, int nav_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & haz_cam_intensity_timestamps, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::map const & haz_cam_to_left_nav_cam_index, // NOLINT + std::map const & haz_cam_to_right_nav_cam_index, // NOLINT + camera::CameraParameters const & nav_cam_params, // NOLINT + camera::CameraParameters const & haz_cam_params, // NOLINT + std::vector const & depth_to_nav_block_sizes, // NOLINT + std::vector const & depth_to_haz_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + // Outputs // NOLINT + std::vector & residual_names, // NOLINT + double & hazcam_depth_to_image_scale, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & hazcam_depth_to_image_vec, // NOLINT + ceres::Problem & problem) { // NOLINT // Figure out the two nav cam indices bounding the current haz cam // Must have sparse_map_timestamp + navcam_to_hazcam_timestamp_offset <= haz_timestamp // which must be < next sparse_map_timestamp + navcam_to_hazcam_timestamp_offset. @@ -917,7 +885,7 @@ void calc_median_residuals(std::vector const& residuals, residual_names.push_back("haznavhaz1"); residual_names.push_back("haznavhaz2"); - // Ensure that the the depth points projects well in the nav cam interest point + // Ensure that the depth points projects well in the nav cam interest point ceres::CostFunction* depth_to_nav_cost_function = dense_map::DepthToNavError::Create(undist_nav_ip, depth_xyz, alpha, match_left, depth_to_nav_block_sizes, nav_cam_params); @@ -1079,7 +1047,7 @@ void calc_median_residuals(std::vector const& residuals, residual_names.push_back("hazscihaz1"); residual_names.push_back("hazscihaz2"); - // Ensure that the the depth points projects well in the sci cam interest point. + // Ensure that the depth points projects well in the sci cam interest point. // Note how we pass a distorted sci cam pix, as in that error function we will // take the difference of distorted pixels. ceres::CostFunction* depth_to_sci_cost_function @@ -1107,38 +1075,39 @@ void calc_median_residuals(std::vector const& residuals, } // Prevent the linter from messing up with the beautiful formatting below - void add_nav_sci_cost(// Inputs // NOLINT - int nav_it, int sci_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - double scicam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::vector const & sci_cam_timestamps, // NOLINT - std::map const & sci_cam_to_left_nav_cam_index, // NOLINT - std::map const & sci_cam_to_right_nav_cam_index, // NOLINT - Eigen::Affine3d const & hazcam_to_navcam_aff_trans, // NOLINT - Eigen::Affine3d const & scicam_to_hazcam_aff_trans, // NOLINT - camera::CameraParameters const & nav_cam_params, // NOLINT - camera::CameraParameters const & sci_cam_params, // NOLINT - std::vector const & nav_block_sizes, // NOLINT - std::vector const & sci_block_sizes, // NOLINT - std::vector const & mesh_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - mve::TriangleMesh::Ptr const & mesh, // NOLINT - std::shared_ptr const & bvh_tree, // NOLINT - // Outputs // NOLINT - int & nav_sci_xyz_count, // NOLINT - std::vector & residual_names, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & scicam_to_hazcam_vec, // NOLINT - Eigen::Vector2d & sci_cam_focal_vector, // NOLINT - Eigen::Vector2d & sci_cam_optical_center, // NOLINT - Eigen::VectorXd & sci_cam_distortion, // NOLINT - std::vector & initial_nav_sci_xyz, // NOLINT - std::vector & nav_sci_xyz, // NOLINT - ceres::Problem & problem) { // NOLINT + void add_nav_sci_cost + (// Inputs // NOLINT + int nav_it, int sci_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + double scicam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::vector const & sci_cam_timestamps, // NOLINT + std::map const & sci_cam_to_left_nav_cam_index, // NOLINT + std::map const & sci_cam_to_right_nav_cam_index, // NOLINT + Eigen::Affine3d const & hazcam_to_navcam_aff_trans, // NOLINT + Eigen::Affine3d const & scicam_to_hazcam_aff_trans, // NOLINT + camera::CameraParameters const & nav_cam_params, // NOLINT + camera::CameraParameters const & sci_cam_params, // NOLINT + std::vector const & nav_block_sizes, // NOLINT + std::vector const & sci_block_sizes, // NOLINT + std::vector const & mesh_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + mve::TriangleMesh::Ptr const & mesh, // NOLINT + std::shared_ptr const & bvh_tree, // NOLINT + // Outputs // NOLINT + int & nav_sci_xyz_count, // NOLINT + std::vector & residual_names, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & scicam_to_hazcam_vec, // NOLINT + Eigen::Vector2d & sci_cam_focal_vector, // NOLINT + Eigen::Vector2d & sci_cam_optical_center, // NOLINT + Eigen::VectorXd & sci_cam_distortion, // NOLINT + std::vector & initial_nav_sci_xyz, // NOLINT + std::vector & nav_sci_xyz, // NOLINT + ceres::Problem & problem) { // NOLINT auto left_it = sci_cam_to_left_nav_cam_index.find(sci_it); auto right_it = sci_cam_to_right_nav_cam_index.find(sci_it); if (left_it == sci_cam_to_left_nav_cam_index.end() || @@ -1295,8 +1264,33 @@ void calc_median_residuals(std::vector const& residuals, return; } - // TODO(oalexan1): This selects haz cam images outside of bracket. - // TODO(oalexan1): No check for bracket size either. + void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { + int raw_image_cols = image.cols; + int raw_image_rows = image.rows; + int calib_image_cols = cam_params.GetDistortedSize()[0]; + int calib_image_rows = cam_params.GetDistortedSize()[1]; + + int factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" + << "These must be equal up to an integer factor.\n"; + } + + if (factor != 1) { + // TODO(oalexan1): This kind of resizing may be creating aliased images. + cv::Mat local_image; + cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); + local_image.copyTo(image); + } + + // Check + if (image.cols != calib_image_cols || image.rows != calib_image_rows) + LOG(FATAL) << "Sci cam images have the wrong size."; + } + void select_images_to_match(// Inputs // NOLINT double haz_cam_start_time, // NOLINT double navcam_to_hazcam_timestamp_offset, // NOLINT @@ -1332,8 +1326,8 @@ void calc_median_residuals(std::vector const& residuals, double navcam_to_scicam_timestamp_offset = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; - // Use these to keep track where in the bags we are. After one traversal forward - // in time they need to be reset. + // Use these to keep track where in the bags we are. After one + // traversal forward in time they need to be reset. int nav_cam_pos = 0, haz_cam_intensity_pos = 0, haz_cam_cloud_pos = 0, sci_cam_pos = 0; for (size_t map_it = 0; map_it + 1 < sparse_map_timestamps.size(); map_it++) { @@ -1355,9 +1349,8 @@ void calc_median_residuals(std::vector const& residuals, if (!dense_map::lookupImage(sparse_map_timestamps[map_it], nav_cam_handle.bag_msgs, save_grayscale, images.back(), nav_cam_pos, found_time)) { - std::cout.precision(17); - std::cout << "Cannot look up nav cam at time " << sparse_map_timestamps[map_it] << std::endl; - LOG(FATAL) << "Cannot look up nav cam image at given time"; + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up nav cam at time " << sparse_map_timestamps[map_it] << ".\n"; } cid_to_image_type.push_back(dense_map::NAV_CAM); @@ -1392,7 +1385,7 @@ void calc_median_residuals(std::vector const& residuals, double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset - haz_cam_start_time; - std::cout << "nav haz nav time bracket " + std::cout << "nav_start haz nav_end times " << nav_start << ' ' << haz_time << ' ' << nav_end << std::endl; std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; @@ -1441,19 +1434,19 @@ void calc_median_residuals(std::vector const& residuals, - haz_cam_start_time; double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset - haz_cam_start_time; - std::cout << "nav sci nav time bracket " + std::cout << "nav_start sci nav_end times " << nav_start << ' ' << sci_time << ' ' << nav_end << std::endl; std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; - // Read an image at 25% resolution + // Read the sci cam image, and perhaps adjust its size images.push_back(cv::Mat()); cv::Mat local_img; if (!dense_map::lookupImage(sci_cam_timestamps.back(), sci_cam_handle.bag_msgs, save_grayscale, local_img, sci_cam_pos, found_time)) - LOG(FATAL) << "Cannot look up sci cam image at given time"; - - cv::resize(local_img, images.back(), cv::Size(), 0.25, 0.25, cv::INTER_AREA); + LOG(FATAL) << "Cannot look up sci cam image at given time."; + adjustImageSize(sci_cam_params, local_img); + local_img.copyTo(images.back()); // Sanity check Eigen::Vector2i sci_cam_size = sci_cam_params.GetDistortedSize(); @@ -1498,7 +1491,7 @@ void calc_median_residuals(std::vector const& residuals, // Set up the variable blocks to optimize for DepthToHazError depth_to_haz_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_haz_block_sizes.push_back(dense_map::NUM_SCALE_PARAMS); + depth_to_haz_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); // Set up the variable blocks to optimize for NavError nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); @@ -1509,7 +1502,7 @@ void calc_median_residuals(std::vector const& residuals, depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_SCALE_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); // Set up the variable blocks to optimize for DepthToSciError depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); @@ -1517,7 +1510,7 @@ void calc_median_residuals(std::vector const& residuals, depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_SCALE_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); depth_to_sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length depth_to_sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center depth_to_sci_block_sizes.push_back(num_scicam_distortions); // distortion @@ -1765,11 +1758,11 @@ int main(int argc, char** argv) { #if 0 std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() << "\n"; #endif + std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; + std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; // Convert hazcam_to_navcam_trans to Affine3d Eigen::Affine3d hazcam_to_navcam_aff_trans; @@ -2194,7 +2187,8 @@ int main(int argc, char** argv) { ceres::Solve(options, &problem, &summary); // Copy back the optimized intrinsics - sci_cam_params.SetFocalLength(Eigen::Vector2d(sci_cam_focal_vector[0], sci_cam_focal_vector[0])); + sci_cam_params.SetFocalLength(Eigen::Vector2d(sci_cam_focal_vector[0], + sci_cam_focal_vector[0])); sci_cam_params.SetOpticalOffset(sci_cam_optical_center); sci_cam_params.SetDistortion(sci_cam_distortion); diff --git a/dense_map/geometry_mapper/tools/camera_refiner2.cc b/dense_map/geometry_mapper/tools/camera_refiner2.cc new file mode 100644 index 00000000..a419be0d --- /dev/null +++ b/dense_map/geometry_mapper/tools/camera_refiner2.cc @@ -0,0 +1,2151 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +// TODO(oalexan1): Consider adding a haz cam to haz cam +// reprojection error in the camera refiner. There will be more +// haz to haz matches than haz to nav or haz to sci. +// TODO(oalexan1): Must document that the cloud timestamp +// that is looked up is what is closest to image timestamp. +// TODO(oalexan1): What if the wrong cloud is looked up +// for given image? Or if the delay is too big? + +// TODO(oalexan1): Must test the DepthError with no bracketing. + +// TODO(oalexan1): Move this to utils +// Get rid of warning beyond our control +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic push +#include +#include +#include +#include +#include +#pragma GCC diagnostic pop + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and " + "full-resolution sci_cam data."); + +DEFINE_string(sparse_map, "", + "A registered SURF sparse map made with some of the ROS bag data, " + "and including nav cam images closely bracketing the sci cam images."); + +DEFINE_string(output_map, "", "Output file containing the updated map."); + +DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); + +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", + "The depth point cloud topic in the bag file."); + +DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", + "The depth camera intensity topic in the bag file."); + +DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); + +DEFINE_int32(num_overlaps, 20, "How many images (of all camera types) close and forward in " + "time to match to given image."); + +DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, + "Use depth haz cam clouds that are within this distance in " + "time from the nearest haz cam intensity image."); + +DEFINE_double(robust_threshold, 3.0, + "Pixel errors much larger than this will be exponentially attenuated " + "to affect less the cost function."); + +DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calibration."); + +DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by " + "less than this."); + +DEFINE_double(bracket_len, 2.0, + "Lookup sci and haz cam images only between consecutive nav cam images " + "whose distance in time is no more than this (in seconds), after adjusting " + "for the timestamp offset between these cameras. It is assumed the robot " + "moves slowly and uniformly during this time."); + +DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); + +DEFINE_string(sci_cam_timestamps, "", + "Use only these sci cam timestamps. Must be " + "a file with one timestamp per line."); + +DEFINE_bool(float_sparse_map, false, "Optimize the sparse map, hence only the camera params."); + +DEFINE_bool(float_scale, false, + "If to optimize the scale of the clouds (use it if the " + "sparse map is kept fixed), or else rescaling and registration is needed."); + +DEFINE_bool(float_timestamp_offsets, false, + "If to optimize the timestamp offsets among the cameras."); + +DEFINE_double(timestamp_offsets_max_change, 1.0, + "If floating the timestamp offsets, do not let them change by more than this " + "(measured in seconds). Existing image bracketing acts as an additional constraint."); + +DEFINE_string(nav_cam_intrinsics_to_float, "", + "Refine 0 or more of the following intrinsics for nav_cam: focal_length, " + "optical_center, distortion. Specify as a quoted list. " + "For example: 'focal_length optical_center'."); + +DEFINE_string(haz_cam_intrinsics_to_float, "", + "Refine 0 or more of the following intrinsics for haz_cam: focal_length, " + "optical_center, distortion. Specify as a quoted list. " + "For example: 'focal_length optical_center'."); + +DEFINE_string(sci_cam_intrinsics_to_float, "", + "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " + "optical_center, distortion. Specify as a quoted list. " + "For example: 'focal_length optical_center'."); + +DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, + std::numeric_limits::quiet_NaN(), + "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "file with this value."); + +DEFINE_double(depth_weight, 10.0, + "The weight to give to depth measurements. Use a bigger number as " + "depth errors are usually small fractions of a meter."); + +DEFINE_string(mesh, "", + "Refine the sci cam so that the sci cam texture agrees with the nav cam " + "texture when projected on this mesh."); + +DEFINE_double(mesh_weight, 25.0, + "A larger value will give more weight to the mesh constraint. " + "Use a bigger number as depth errors are usually small fractions of a meter."); + +DEFINE_bool(verbose, false, + "Print the residuals and save the images and match files." + "Stereo Pipeline's viewer can be used for visualizing these."); + +namespace dense_map { + + // Calculate interpolated world to camera trans + Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, + const double* end_world_to_ref_t, + const double* ref_to_cam_trans, + double beg_ref_stamp, + double end_ref_stamp, + double ref_to_cam_offset, + double cam_stamp) { + Eigen::Affine3d beg_world_to_ref_aff; + array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); + + Eigen::Affine3d end_world_to_ref_aff; + array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); + + Eigen::Affine3d ref_to_cam_aff; + array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); + + // std::cout.precision(18); + // std::cout << "--beg stamp " << beg_ref_stamp << std::endl; + // std::cout << "--end stamp " << end_ref_stamp << std::endl; + // std::cout << "cam stamp " << cam_stamp << std::endl; + // std::cout.precision(18); + // std::cout << "ref to cam off " << ref_to_cam_offset << std::endl; + // std::cout << "--ref to cam trans\n" << ref_to_cam_aff.matrix() << std::endl; + + // Covert from cam time to ref time and normalize. It is very + // important that below we subtract the big numbers from each + // other first, which are the timestamps, then subtract whatever + // else is necessary. Otherwise we get problems with numerical + // precision with CERES. + double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) + / (end_ref_stamp - beg_ref_stamp); + + if (beg_ref_stamp == end_ref_stamp) + alpha = 0.0; // handle division by zero + + // std::cout << "--alpha " << alpha << std::endl; + if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; + + // Interpolate at desired time + Eigen::Affine3d interp_world_to_ref_aff + = dense_map::linearInterp(alpha, beg_world_to_ref_aff, end_world_to_ref_aff); + + Eigen::Affine3d interp_world_to_cam_afff = ref_to_cam_aff * interp_world_to_ref_aff; + + // std::cout << "final trans\n" << interp_world_to_cam_afff.matrix() << std::endl; + + return interp_world_to_cam_afff; + } + + // TODO(oalexan1): Store separately matches which end up being + // squashed in a pid_cid_to_fid. + +ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { + // Convert to lower-case + std::transform(cost_fun.begin(), cost_fun.end(), cost_fun.begin(), ::tolower); + + ceres::LossFunction* loss_function = NULL; + if (cost_fun == "l2") + loss_function = NULL; + else if (cost_fun == "huber") + loss_function = new ceres::HuberLoss(th); + else if (cost_fun == "cauchy") + loss_function = new ceres::CauchyLoss(th); + else if (cost_fun == "l1") + loss_function = new ceres::SoftLOneLoss(th); + else + LOG(FATAL) << "Unknown cost function: " + cost_fun; + + return loss_function; +} + +// An error function minimizing the error of projecting +// an xyz point into a camera that is bracketed by +// two reference cameras. The precise timestamp offset +// between them is also floated. +struct BracketedCamError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + BracketedCamError(Eigen::Vector2d const& meas_dist_pix, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params): + m_meas_dist_pix(meas_dist_pix), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes), + m_cam_params(cam_params), + m_num_focal_lengths(1) { + // Sanity check + if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || + m_block_sizes[3] != NUM_XYZ_PARAMS || m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != m_num_focal_lengths || m_block_sizes[6] != NUM_OPT_CTR_PARAMS || + m_block_sizes[7] != 1 // This will be overwritten shortly + ) { + LOG(FATAL) << "BracketedCamError: The block sizes were not set up properly.\n"; + } + + // Set the correct distortion size. This cannot be done in the interface for now. + // TODO(oalexan1): Make individual block sizes for each camera, then this issue will go away. + m_block_sizes[7] = m_cam_params.GetDistortion().size(); + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[4][0], // ref_to_cam_offset + m_cam_stamp); + + // World point + Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); + // std::cout << "--bracketX is " << X.transpose() << std::endl; + + // Make a deep copy which we will modify + camera::CameraParameters cam_params = m_cam_params; + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); + Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); + Eigen::VectorXd distortion(m_block_sizes[7]); + for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; + cam_params.SetFocalLength(focal_vector); + cam_params.SetOpticalOffset(optical_center); + cam_params.SetDistortion(distortion); + + // std::cout << "--focal vector " << focal_vector.transpose() << std::endl; + // std::cout << "--opt ctr " << optical_center.transpose() << std::endl; + // std::cout << "-dist " << distortion.transpose() << std::endl; + + // Convert world point to given cam coordinates + X = world_to_cam_trans * X; + + // std::cout << "--trans X " << X.transpose() << std::endl; + + // Project into the image + Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + Eigen::Vector2d curr_dist_pix; + cam_params.Convert(undist_pix, &curr_dist_pix); + + // Compute the residuals + residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; + residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; + + // std::cout << "--residuals " << residuals[0] << ' ' << residuals[1] << std::endl; + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* + Create(Eigen::Vector2d const& meas_dist_pix, double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes, + camera::CameraParameters const& cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedCamError(meas_dist_pix, left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes, cam_params)); + + cost_function->SetNumResiduals(NUM_RESIDUALS); + + // The camera wrapper knows all of the block sizes to add, except + // for distortion, which is last + for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 + cost_function->AddParameterBlock(block_sizes[i]); + + // The distortion block size is added separately as it is variable + cost_function->AddParameterBlock(cam_params.GetDistortion().size()); + + return cost_function; + } + + private: + Eigen::Vector2d m_meas_dist_pix; // Measured distorted current camera pixel + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp + std::vector m_block_sizes; + camera::CameraParameters m_cam_params; + int m_num_focal_lengths; +}; // End class BracketedCamError + +// An error function minimizing the error of projecting an xyz point +// into a reference camera. Bracketing, timestamps, and transform to +// ref cam are not needed. +struct RefCamError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + RefCamError(Eigen::Vector2d const& meas_dist_pix, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params): + m_meas_dist_pix(meas_dist_pix), + m_block_sizes(block_sizes), + m_cam_params(cam_params), + m_num_focal_lengths(1) { + // Sanity check + if (m_block_sizes.size() != 5 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_XYZ_PARAMS || + m_block_sizes[2] != m_num_focal_lengths || + m_block_sizes[3] != NUM_OPT_CTR_PARAMS || + m_block_sizes[4] != 1 // This will be overwritten shortly + ) { + LOG(FATAL) << "RefCamError: The block sizes were not set up properly.\n"; + } + + // Set the correct distortion size. This cannot be done in the interface for now. + + // TODO(oalexan1): Make individual block sizes for each camera, + // then this issue will go away. + m_block_sizes[4] = m_cam_params.GetDistortion().size(); + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + Eigen::Affine3d world_to_ref_t; + array_to_rigid_transform(world_to_ref_t, parameters[0]); + + // World point + Eigen::Vector3d X; + for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[1][it]; + // std::cout << "--refX is " << X.transpose() << std::endl; + + // Make a deep copy which we will modify + camera::CameraParameters cam_params = m_cam_params; + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[2][0], parameters[2][0]); + Eigen::Vector2d optical_center(parameters[3][0], parameters[3][1]); + Eigen::VectorXd distortion(m_block_sizes[4]); + for (int i = 0; i < m_block_sizes[4]; i++) distortion[i] = parameters[4][i]; + cam_params.SetFocalLength(focal_vector); + cam_params.SetOpticalOffset(optical_center); + cam_params.SetDistortion(distortion); + + // std::cout << "--focal vector " << focal_vector.transpose() << std::endl; + // std::cout << "--opt ctr " << optical_center.transpose() << std::endl; + // std::cout << "-dist " << distortion.transpose() << std::endl; + + // Convert world point to given cam coordinates + X = world_to_ref_t * X; + + // std::cout << "--trans X " << X.transpose() << std::endl; + + // Project into the image + Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + Eigen::Vector2d curr_dist_pix; + cam_params.Convert(undist_pix, &curr_dist_pix); + + // Compute the residuals + residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; + residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; + + // std::cout << "--residuals " << residuals[0] << ' ' << residuals[1] << std::endl; + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* + Create(Eigen::Vector2d const& meas_dist_pix, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new RefCamError(meas_dist_pix, block_sizes, cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_RESIDUALS); + + // The camera wrapper knows all of the block sizes to add, except + // for distortion, which is last + for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 + cost_function->AddParameterBlock(block_sizes[i]); + + // The distortion block size is added separately as it is variable + cost_function->AddParameterBlock(cam_params.GetDistortion().size()); + + return cost_function; + } + + private: + Eigen::Vector2d m_meas_dist_pix; + std::vector m_block_sizes; + camera::CameraParameters m_cam_params; + int m_num_focal_lengths; +}; // End class RefCamError + +// An error function minimizing the product of a given weight and the +// error between a triangulated point and a measured depth point. The +// depth point needs to be transformed to world coordinates first. For +// that one has to do pose interpolation. +struct BracketedDepthError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + BracketedDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 7 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || + m_block_sizes[3] != NUM_RIGID_PARAMS || + m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != NUM_XYZ_PARAMS || + m_block_sizes[6] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "BracketedDepthError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Current world to camera transform + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[6][0], // ref_to_cam_offset + m_cam_stamp); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + array_to_rigid_transform(depth_to_image, parameters[3]); + + // Apply the scale + double depth_to_image_scale = parameters[4][0]; + depth_to_image.linear() *= depth_to_image_scale; + // std::cout << "--depth to image:\n" << depth_to_image.matrix() << std::endl; + + // std::cout << "--meas pt " << m_meas_depth_xyz.transpose() << std::endl; + + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; + + // std::cout << "--image meas pt " << M.transpose() << std::endl; + + // Convert to world coordinates + M = world_to_cam_trans.inverse() * M; + // std::cout << "--depth in world coords " << M.transpose() << std::endl; + + // Triangulated world point + Eigen::Vector3d X(parameters[5][0], parameters[5][1], parameters[5][2]); + // std::cout << "--triangulated X is " << X.transpose() << std::endl; + + // std::cout << "--weight " << m_weight << std::endl; + + // Compute the residuals + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (X[it] - M[it]); + // std::cout << "--residual " << residuals[it] << std::endl; + } + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, + double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedDepthError(weight, meas_depth_xyz, left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + for (size_t i = 0; i < block_sizes.size(); i++) + cost_function->AddParameterBlock(block_sizes[i]); + + return cost_function; + } + + private: + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp + std::vector m_block_sizes; +}; // End class BracketedDepthError + +// An error function minimizing the product of a given weight and the +// error between a triangulated point and a measured depth point for +// the ref camera. The depth point needs to be transformed to world +// coordinates first. +struct RefDepthError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + RefDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 4 || m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_SCALAR_PARAMS || + m_block_sizes[3] != NUM_XYZ_PARAMS) { + LOG(FATAL) << "RefDepthError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Current world to camera transform + Eigen::Affine3d world_to_cam; + array_to_rigid_transform(world_to_cam, parameters[0]); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + array_to_rigid_transform(depth_to_image, parameters[1]); + + // Apply the scale + double depth_to_image_scale = parameters[2][0]; + depth_to_image.linear() *= depth_to_image_scale; + // std::cout << "--depth to image:\n" << depth_to_image.matrix() << std::endl; + + // std::cout << "--meas pt " << m_meas_depth_xyz.transpose() << std::endl; + + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; + + // std::cout << "--image meas pt " << M.transpose() << std::endl; + + // Convert to world coordinates + M = world_to_cam.inverse() * M; + // std::cout << "--depth in world coords " << M.transpose() << std::endl; + + // Triangulated world point + Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); + // std::cout << "--triangulated X is " << X.transpose() << std::endl; + + // std::cout << "--weight " << m_weight << std::endl; + + // Compute the residuals + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (X[it] - M[it]); + // std::cout << "--residual " << residuals[it] << std::endl; + } + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, + std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new RefDepthError(weight, meas_depth_xyz, block_sizes)); + + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + for (size_t i = 0; i < block_sizes.size(); i++) + cost_function->AddParameterBlock(block_sizes[i]); + + return cost_function; + } + + private: + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + std::vector m_block_sizes; +}; // End class RefDepthError + +// An error function minimizing a weight times the distance from a +// variable xyz point to a fixed reference xyz point. +struct XYZError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + XYZError(Eigen::Vector3d const& ref_xyz, std::vector const& block_sizes, double weight) + : m_ref_xyz(ref_xyz), m_block_sizes(block_sizes), m_weight(weight) { + // Sanity check + if (m_block_sizes.size() != 1 || m_block_sizes[0] != NUM_XYZ_PARAMS) + LOG(FATAL) << "XYZError: The block sizes were not set up properly.\n"; + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + // Takes array of arrays as parameters. + // TODO(oalexan1): May want to use the analytical Ceres cost function + bool operator()(double const* const* parameters, double* residuals) const { + // Compute the residuals + for (int it = 0; it < NUM_XYZ_PARAMS; it++) residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector3d const& ref_xyz, + std::vector const& block_sizes, + double weight) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction(new XYZError(ref_xyz, block_sizes, weight)); + + // The residual size is always the same + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector3d m_ref_xyz; // reference xyz + std::vector m_block_sizes; + double m_weight; +}; // End class XYZError + +enum imgType { NAV_CAM, SCI_CAM, HAZ_CAM }; + +// Calculate the rmse residual for each residual type. +void calc_median_residuals(std::vector const& residuals, + std::vector const& residual_names, + std::string const& tag) { + size_t num = residuals.size(); + + if (num != residual_names.size()) + LOG(FATAL) << "There must be as many residuals as residual names."; + + std::map > stats; + for (size_t it = 0; it < residuals.size(); it++) + stats[residual_names[it]] = std::vector(); // initialize + + for (size_t it = 0; it < residuals.size(); it++) + stats[residual_names[it]].push_back(std::abs(residuals[it])); + + std::cout << "The 25, 50 and 75 percentile residual stats " << tag << std::endl; + for (auto it = stats.begin(); it != stats.end(); it++) { + std::string const& name = it->first; + std::vector vals = stats[name]; // make a copy + std::sort(vals.begin(), vals.end()); + + int len = vals.size(); + + int it1 = static_cast(0.25 * len); + int it2 = static_cast(0.50 * len); + int it3 = static_cast(0.75 * len); + + if (len == 0) + std::cout << name << ": " << "none"; + else + std::cout << std::setprecision(8) + << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3]; + std::cout << " (" << len << " residuals)" << std::endl; + } +} + + // TODO(oalexan1): This needs to be handled better. + void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { + int raw_image_cols = image.cols; + int raw_image_rows = image.rows; + int calib_image_cols = cam_params.GetDistortedSize()[0]; + int calib_image_rows = cam_params.GetDistortedSize()[1]; + + int factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" + << "These must be equal up to an integer factor.\n"; + } + + if (factor != 1) { + // TODO(oalexan1): This kind of resizing may be creating aliased images. + cv::Mat local_image; + cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); + local_image.copyTo(image); + } + + // Check + if (image.cols != calib_image_cols || image.rows != calib_image_rows) + LOG(FATAL) << "Sci cam images have the wrong size."; + } + + typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; + + // A class to encompass all known information about a camera + // This is work in progress and will replace some of the logic further down. + struct cameraImage { + // An index to look up the type of camera. This will equal the + // value ref_camera_type if and only if this is a reference + // camera. + int camera_type; + + // The timestamp for this camera (in floating point seconds since epoch) + double timestamp; + + // The timestamp with an adjustment added to it to be in + // reference camera time + double ref_timestamp; + + // Indices to look up the reference cameras bracketing this camera + // in time. The two indices will have same value if and only if + // this is a reference camera. + int beg_ref_index; + int end_ref_index; + + // The image for this camera, in grayscale + cv::Mat image; + + // The corresponding depth cloud, for an image + depth camera + cv::Mat depth_cloud; + }; + + // Sort by timestamps in the ref camera clock + bool timestampLess(cameraImage i, cameraImage j) { + return (i.ref_timestamp < j.ref_timestamp); + } + + // Find the haz cam depth measurement. Use nearest neighbor interpolation + // to look into the depth cloud. + bool depthValue( // Inputs + cv::Mat const& depth_cloud, Eigen::Vector2d const& dist_ip, + // Output + Eigen::Vector3d& depth_xyz) { + depth_xyz = Eigen::Vector3d(0, 0, 0); // initialize + + if (depth_cloud.cols == 0 && depth_cloud.rows == 0) return false; // empty cloud + + int col = round(dist_ip[0]); + int row = round(dist_ip[1]); + + if (col < 0 || row < 0 || col > depth_cloud.cols || row > depth_cloud.rows) + LOG(FATAL) << "Out of range in depth cloud."; + + // After rounding one may hit the bound + if (col == depth_cloud.cols || row == depth_cloud.rows) + return false; + + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) + return false; + + depth_xyz = Eigen::Vector3d(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + + return true; + } + + // Rebuild the map. + // TODO(oalexan1): This must be integrated in astrobee. + void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data + camera::CameraParameters const& cam_params, + boost::shared_ptr sparse_map) { + std::string rebuild_detector = "SURF"; + std::cout << "Rebuilding map with " << rebuild_detector << " detector."; + + // Copy some data to make sure it does not get lost on resetting things below + std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; + std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; + + // Ensure the new camera parameters are set + sparse_map->SetCameraParameters(cam_params); + + std::cout << "Detecting features."; + sparse_map->DetectFeatures(); + + std::cout << "Matching features."; + // Borrow from the original map which images should be matched with which. + sparse_map->cid_to_cid_.clear(); + for (size_t p = 0; p < pid_to_cid_fid.size(); p++) { + std::map const& track = pid_to_cid_fid[p]; // alias + for (std::map::const_iterator it1 = track.begin(); + it1 != track.end() ; it1++) { + for (std::map::const_iterator it2 = it1; + it2 != track.end() ; it2++) { + if (it1->first != it2->first) { + // Never match an image with itself + sparse_map->cid_to_cid_[it1->first].insert(it2->first); + } + } + } + } + + sparse_mapping::MatchFeatures(sparse_mapping::EssentialFile(map_file), + sparse_mapping::MatchesFile(map_file), sparse_map.get()); + for (size_t i = 0; i < world_to_ref_t.size(); i++) + sparse_map->SetFrameGlobalTransform(i, world_to_ref_t[i]); + + // Wipe file that is no longer needed + try { + std::remove(sparse_mapping::EssentialFile(map_file).c_str()); + }catch(...) {} + + std::cout << "Building tracks."; + bool rm_invalid_xyz = true; // by now cameras are good, so filter out bad stuff + sparse_mapping::BuildTracks(rm_invalid_xyz, + sparse_mapping::MatchesFile(map_file), + sparse_map.get()); + + // It is essential that during re-building we do not vary the + // cameras. Those were usually computed with a lot of SURF features, + // while rebuilding is usually done with many fewer ORGBRISK + // features. + bool fix_cameras = true; + if (fix_cameras) + std::cout << "Performing bundle adjustment with fixed cameras."; + else + std::cout << "Performing bundle adjustment while floating cameras."; + + sparse_mapping::BundleAdjust(fix_cameras, sparse_map.get()); + } + + // TODO(oalexan1): Move this to utils. + // Intersect ray with mesh. Return true on success. + bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + double min_ray_dist, double max_ray_dist, + // Output + Eigen::Vector3d& intersection) { + // Initialize the output + intersection = Eigen::Vector3d(0.0, 0.0, 0.0); + + // Ray from camera going through the pixel + Eigen::Vector3d cam_ray(undist_pix.x() / cam_params.GetFocalVector()[0], + undist_pix.y() / cam_params.GetFocalVector()[1], 1.0); + cam_ray.normalize(); + + Eigen::Affine3d cam_to_world = world_to_cam.inverse(); + Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; + Eigen::Vector3d cam_ctr = cam_to_world.translation(); + + // Set up the ray structure for the mesh + BVHTree::Ray bvh_ray; + bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); + bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); + bvh_ray.dir.normalize(); + + bvh_ray.tmin = min_ray_dist; + bvh_ray.tmax = max_ray_dist; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (bvh_tree->intersect(bvh_ray, &hit)) { + double cam_to_mesh_dist = hit.t; + intersection = cam_ctr + cam_to_mesh_dist * world_ray; + return true; + } + + return false; + } + +} // namespace dense_map + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; + + std::cout.precision(17); // to be able to print timestamps + + if (FLAGS_ros_bag.empty()) + LOG(FATAL) << "The bag file was not specified."; + if (FLAGS_sparse_map.empty()) + LOG(FATAL) << "The input sparse map was not specified."; + + if (FLAGS_output_map.empty()) + LOG(FATAL) << "The output sparse map was not specified."; + + if (FLAGS_robust_threshold <= 0.0) + LOG(FATAL) << "The robust threshold must be positive.\n"; + + if (FLAGS_bracket_len <= 0.0) LOG(FATAL) << "Bracket length must be positive."; + + if (FLAGS_num_overlaps < 1) LOG(FATAL) << "Number of overlaps must be positive."; + + if (FLAGS_timestamp_offsets_max_change < 0) + LOG(FATAL) << "The timestamp offsets must be non-negative."; + + // Read the config file + double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; + Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::Affine3d hazcam_depth_to_image_transform; + hazcam_depth_to_image_transform.setIdentity(); // default value + camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), + Eigen::Vector2d(0, 0)); + camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), + Eigen::Vector2d(0, 0)); + camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), + Eigen::Vector2d(0, 0)); + dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", // NOLINT + "scicam_to_hazcam_timestamp_offset", // NOLINT + "hazcam_to_navcam_transform", // NOLINT + "scicam_to_hazcam_transform", // NOLINT + "nav_cam_transform", // NOLINT + "hazcam_depth_to_image_transform", // NOLINT + navcam_to_hazcam_timestamp_offset, + scicam_to_hazcam_timestamp_offset, + hazcam_to_navcam_trans, scicam_to_hazcam_trans, + navcam_to_body_trans, hazcam_depth_to_image_transform, + nav_cam_params, haz_cam_params, + sci_cam_params); + + if (!std::isnan(FLAGS_scicam_to_hazcam_timestamp_offset_override_value)) { + double new_val = FLAGS_scicam_to_hazcam_timestamp_offset_override_value; + std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset + << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; + scicam_to_hazcam_timestamp_offset = new_val; + } + + if (FLAGS_mesh_weight <= 0.0) + LOG(FATAL) << "The mesh weight must be positive.\n"; + + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; + std::shared_ptr bvh_tree; + if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + + // If desired to process only specific timestamps + std::set sci_cam_timestamps_to_use; + if (FLAGS_sci_cam_timestamps != "") { + std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); + double val; + while (ifs >> val) sci_cam_timestamps_to_use.insert(val); + } + +#if 0 + std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; + std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; + std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() + << "\n"; +#endif + std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; + std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; + + double hazcam_depth_to_image_scale + = pow(hazcam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); + + // Since we will keep the scale fixed, vary the part of the transform without + // the scale, while adding the scale each time before the transform is applied + Eigen::Affine3d hazcam_depth_to_image_noscale = hazcam_depth_to_image_transform; + hazcam_depth_to_image_noscale.linear() /= hazcam_depth_to_image_scale; + + // Convert hazcam_to_navcam_trans to Affine3d + Eigen::Affine3d hazcam_to_navcam_aff_trans; + hazcam_to_navcam_aff_trans.matrix() = hazcam_to_navcam_trans; + + // Convert scicam_to_hazcam_trans to Affine3d + Eigen::Affine3d scicam_to_hazcam_aff_trans; + scicam_to_hazcam_aff_trans.matrix() = scicam_to_hazcam_trans; + + // Read the sparse map + boost::shared_ptr sparse_map = + boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + + // TODO(oalexan1): All this timestamp reading logic below should be in a function + + // Parse the ref timestamps from the sparse map + // We assume the sparse map image names are the timestamps. + std::vector ref_timestamps; + const std::vector& sparse_map_images = sparse_map->cid_to_filename_; + ref_timestamps.resize(sparse_map_images.size()); + for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { + double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); + ref_timestamps[cid] = timestamp; + } + if (ref_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; + + // Will optimize the nav cam poses as part of the process + std::vector & world_to_ref_t = sparse_map->cid_to_cam_t_global_; // alias + + // Put transforms of the reference cameras in a vector. We will optimize them. + int num_ref_cams = world_to_ref_t.size(); + if (world_to_ref_t.size() != ref_timestamps.size()) + LOG(FATAL) << "Must have as many ref cam timestamps as ref cameras.\n"; + std::vector world_to_ref_vec(num_ref_cams * dense_map::NUM_RIGID_PARAMS); + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::rigid_transform_to_array(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // We assume our camera rig has n camera types. Each can be image or + // depth + image. Just one camera must be the reference camera. In + // this code it will be nav_cam. Every camera object (class + // cameraImage) knows its type (an index), which can be used to look + // up its intrinsics, image topic, depth topic (if present), + // ref_to_cam_timestamp_offset, and ref_to_cam_transform. A camera + // object also stores its image, depth cloud (if present), its + // timestamp, and indices pointing to its left and right ref + // bracketing cameras. + + // For every instance of a reference camera its + // ref_to_cam_timestamp_offset is 0 and kept fixed, + // ref_to_cam_transform is the identity and kept fixed, and the + // indices pointing to the left and right ref bracketing cameras are + // identical. + + // The info below will eventually come from a file + int num_cam_types = 3; + int ref_cam_type = 0; // Below we assume the starting cam is the ref cam + + // Image and depth topics + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", + "/hw/depth_haz/extended/amplitude_int", + "/hw/cam_sci/compressed"}; + std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; + + std::vector> cam_timestamps_to_use = {std::set(), + std::set(), + sci_cam_timestamps_to_use}; + + // The timestamp offsets from ref cam to given cam + std::vector ref_to_cam_timestamp_offsets = + {0.0, + navcam_to_hazcam_timestamp_offset, + navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset}; + + for (size_t it = 0; it < ref_to_cam_timestamp_offsets.size(); it++) { + std::cout << "Ref to cam offset for " << cam_names[it] << ' ' + << ref_to_cam_timestamp_offsets[it] << std::endl; + } + + std::vector cam_params = {nav_cam_params, + haz_cam_params, + sci_cam_params}; + + // Which intrinsics from which cameras to float + std::vector> intrinsics_to_float(num_cam_types); + dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); + dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); + dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); + + // The transform from ref to given cam + std::vector ref_to_cam_trans; + ref_to_cam_trans.push_back(Eigen::Affine3d::Identity()); + ref_to_cam_trans.push_back(hazcam_to_navcam_aff_trans.inverse()); + ref_to_cam_trans.push_back(scicam_to_hazcam_aff_trans.inverse() + * hazcam_to_navcam_aff_trans.inverse()); + + // Put in arrays, so we can optimize them + std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::rigid_transform_to_array + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + for (size_t it = 0; it < ref_to_cam_trans.size(); it++) { + std::cout << "Ref to cam transform for " << cam_names[it] << ":\n" + << ref_to_cam_trans[it].matrix() << std::endl; + } + + // Depth to image transforms and scales + std::vector depth_to_image_noscale; + std::vector depth_to_image_scales = {1.0, hazcam_depth_to_image_scale, 1.0}; + depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); + depth_to_image_noscale.push_back(hazcam_depth_to_image_noscale); + depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); + // Put in arrays, so we can optimize them + std::vector depth_to_image_noscale_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::rigid_transform_to_array + (depth_to_image_noscale[cam_type], + &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Put the intrinsics in arrays + std::vector focal_lengths(num_cam_types); + std::vector optical_centers(num_cam_types); + std::vector distortions(num_cam_types); + for (int it = 0; it < num_cam_types; it++) { + focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths + optical_centers[it] = cam_params[it].GetOpticalOffset(); + distortions[it] = cam_params[it].GetDistortion(); + } + + // Build a map for quick access for all the messages we may need + // TODO(oalexan1): Must the view be kept open for this to work? + std::vector topics; + for (auto it = 0; it < image_topics.size(); it++) + if (image_topics[it] != "") topics.push_back(image_topics[it]); + for (auto it = 0; it < depth_topics.size(); it++) + if (depth_topics[it] != "") topics.push_back(depth_topics[it]); + rosbag::Bag bag; + bag.open(FLAGS_ros_bag, rosbag::bagmode::Read); + rosbag::View view(bag, rosbag::TopicQuery(topics)); + std::map> bag_map; + dense_map::indexMessages(view, bag_map); + + // A lot of care is needed here. This remembers how we travel in time + // for each camera type so we have fewer messages to search. + // But if a mistake is done below it will mess up this bookkeeping. + std::vector image_start_positions(num_cam_types, 0); + std::vector cloud_start_positions(num_cam_types, 0); + + // Cannot add a (positive) value more this to + // ref_to_cam_timestamp_offsets[cam_type] before getting out of the + // bracket. + std::vector upper_bound(num_cam_types, 1.0e+100); + + // Cannot add a (negative) value less than this to + // ref_to_cam_timestamp_offsets[cam_type] before getting out of the + // bracket. + std::vector lower_bound(num_cam_types, -1.0e+100); + + std::cout << "Bracketing the images in time." << std::endl; + + // Populate the data for each camera image + std::vector cams; + for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { + std::cout.precision(18); + + if (ref_cam_type != 0) + LOG(FATAL) << "It is assumed that the ref cam type is 0."; + + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + dense_map::cameraImage cam; + bool success = false; + if (cam_type == ref_cam_type) { + cam.camera_type = cam_type; + cam.timestamp = ref_timestamps[ref_it]; + cam.ref_timestamp = cam.timestamp; // the time offset is 0 between ref and itself + cam.beg_ref_index = ref_it; + cam.end_ref_index = ref_it; + + // Search the whole set of timestamps, so set start_pos = + // 0. This is slower but more robust than keeping track of how + // we move in the increasing order of time. + int start_pos = 0; + bool save_grayscale = true; // for matching we will need grayscale + double found_time = -1.0; + // this has to succeed since we picked the ref images in the map + if (!dense_map::lookupImage(cam.timestamp, bag_map[image_topics[cam_type]], save_grayscale, + // outputs + cam.image, image_start_positions[cam_type], // care here + found_time)) + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up camera at time " << cam.timestamp << ".\n"; + + // The exact time is expected + if (found_time != cam.timestamp) + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up camera at time " << cam.timestamp << ".\n"; + + // See if to skip this timestamp + if (!cam_timestamps_to_use[cam_type].empty() && + cam_timestamps_to_use[cam_type].find(cam.timestamp) == + cam_timestamps_to_use[cam_type].end()) + continue; + + success = true; + + } else { + if (ref_it + 1 >= num_ref_cams) break; // Arrived at the end, cannot do a bracket + + // Convert the bracketing timestamps to current cam's time + double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_timestamp_offsets[cam_type]; + double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_timestamp_offsets[cam_type]; + + if (right_timestamp <= left_timestamp) + LOG(FATAL) << "Ref timestamps must be in increasing order.\n"; + + if (right_timestamp - left_timestamp > FLAGS_bracket_len) + continue; // Must respect the bracket length + + // Find the image timestamp closest to the midpoint of the brackets. This will give + // more room to vary the timestamp later. + double mid_timestamp = (left_timestamp + right_timestamp)/2.0; + + // Search forward in time from image_start_positions[cam_type]. + // We will update that too later. One has to be very careful + // with it so it does not go too far forward in time + // so that at the next iteration we are passed what we + // search for. + int start_pos = image_start_positions[cam_type]; // care here + bool save_grayscale = true; // for matching we will need grayscale + double curr_timestamp = left_timestamp; // start here + cv::Mat best_image; + double best_dist = 1.0e+100; + double best_time = -1.0, found_time = -1.0; + while (1) { + if (found_time >= right_timestamp) break; // out of range + + cv::Mat image; + if (!dense_map::lookupImage(curr_timestamp, bag_map[image_topics[cam_type]], save_grayscale, + // outputs + image, + start_pos, // care here + found_time)) + break; // Need not succeed, but then there's no need to go on are we are at the end + + std::cout.precision(18); + + double curr_dist = std::abs(found_time - mid_timestamp); + if (curr_dist < best_dist) { + best_dist = curr_dist; + best_time = found_time; + // Update the start position for the future only if this is a good + // solution. Otherwise we may have moved too far. + image_start_positions[cam_type] = start_pos; + image.copyTo(best_image); + } + + // Go forward in time. We count on the fact that lookupImage() looks forward from given guess. + curr_timestamp = std::nextafter(found_time, 1.01 * found_time); + } + + if (best_time < 0.0) continue; // bracketing failed + + // Note how we allow best_time == left_timestamp if there's no other choice + if (best_time < left_timestamp || best_time >= right_timestamp) continue; // no luck + + cam.camera_type = cam_type; + cam.timestamp = best_time; + cam.ref_timestamp = best_time - ref_to_cam_timestamp_offsets[cam_type]; + cam.beg_ref_index = ref_it; + cam.end_ref_index = ref_it + 1; + cam.image = best_image; + + upper_bound[cam_type] = std::min(upper_bound[cam_type], best_time - left_timestamp); + lower_bound[cam_type] = std::max(lower_bound[cam_type], best_time - right_timestamp); + + // TODO(oalexan1): Wipe this temporary block + if (cam_type == 1) { + // Must compare raw big timestamps before and after! + // Must compare residuals! + + double nav_start = ref_timestamps[ref_it]; + double haz_time = cam.ref_timestamp; + double nav_end = ref_timestamps[ref_it + 1]; + // std::cout << "--xxxhaz after " << haz_time - nav_start << ' ' << nav_end - haz_time << + // ' ' << nav_end - nav_start << std::endl; + } + if (cam_type == 2) { + double nav_start = ref_timestamps[ref_it]; + double sci_time = cam.ref_timestamp; + double nav_end = ref_timestamps[ref_it + 1]; + // std::cout << "--xxxsci after " << sci_time - nav_start << ' ' << nav_end - sci_time << + // ' ' << nav_end - nav_start << std::endl; + } + + // See if to skip this timestamp + if (!cam_timestamps_to_use[cam_type].empty() && + cam_timestamps_to_use[cam_type].find(cam.timestamp) == + cam_timestamps_to_use[cam_type].end()) + continue; + + success = true; + } + + if (!success) continue; + + if (depth_topics[cam_type] != "") { + double found_time = -1.0; + cv::Mat cloud; + // Look up the closest cloud in time (either before or after cam.timestamp) + // This need not succeed. + dense_map::lookupCloud(cam.timestamp, bag_map[depth_topics[cam_type]], + FLAGS_max_haz_cam_image_to_depth_timestamp_diff, + // Outputs + cam.depth_cloud, + cloud_start_positions[cam_type], // care here + found_time); + } + + cams.push_back(cam); + } // end loop over camera types + } // end loop over ref images + + std::cout << "Allowed timestamp offset range while respecting the bracket for given cameras:\n"; + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here + + // So far we had the relative change. Now add the actual offset to get the max allowed offset. + lower_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + upper_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + + std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] + << ", " << upper_bound[cam_type] << "]\n"; + } + + if (FLAGS_float_timestamp_offsets) { + std::cout << "Given the user constraint the timestamp offsets will float in these ranges:\n"; + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here + + lower_bound[cam_type] = std::max(lower_bound[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + - FLAGS_timestamp_offsets_max_change); + + upper_bound[cam_type] = std::min(upper_bound[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + + FLAGS_timestamp_offsets_max_change); + + // Tighten a bit to ensure we don't exceed things when we add + // and subtract timestamps later. Note that timestamps are + // measured in seconds and fractions of a second since epoch and + // can be quite large so precision loss can easily happen. + lower_bound[cam_type] += 1.0e-5; + upper_bound[cam_type] -= 1.0e-5; + + std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] + << ", " << upper_bound[cam_type] << "]\n"; + } + } + + std::cout << "--Deal with adjustment!" << std::endl; + for (size_t it = 0; it < cams.size(); it++) { + if (cams[it].camera_type == 2) { + dense_map::adjustImageSize(cam_params[2], cams[it].image); + } + } + + std::sort(cams.begin(), cams.end(), dense_map::timestampLess); + + if (FLAGS_verbose) { + int count = 10000; + std::vector image_files; + for (size_t it = 0; it < cams.size(); it++) { + std::ostringstream oss; + oss << count << "_" << cams[it].camera_type << ".jpg"; + std::string name = oss.str(); + std::cout << "--writing " << name << std::endl; + cv::imwrite(name, cams[it].image); + count++; + image_files.push_back(name); + } + } + + std::cout << "Detecting features." << std::endl; + + // Detect features using multiple threads + std::vector cid_to_descriptor_map; + std::vector cid_to_keypoint_map; + cid_to_descriptor_map.resize(cams.size()); + cid_to_keypoint_map.resize(cams.size()); + ff_common::ThreadPool thread_pool1; + for (size_t it = 0; it < cams.size(); it++) { + thread_pool1.AddTask(&dense_map::detectFeatures, cams[it].image, FLAGS_verbose, + &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + } + thread_pool1.Join(); + + dense_map::MATCH_MAP matches; + + std::vector > image_pairs; + for (size_t it1 = 0; it1 < cams.size(); it1++) { + for (size_t it2 = it1 + 1; it2 < std::min(cams.size(), it1 + FLAGS_num_overlaps + 1); it2++) { + image_pairs.push_back(std::make_pair(it1, it2)); + } + } + + std::cout << "Matching features." << std::endl; + ff_common::ThreadPool thread_pool2; + std::mutex match_mutex; + for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { + auto pair = image_pairs[pair_it]; + int left_image_it = pair.first, right_image_it = pair.second; + bool verbose = true; + thread_pool2.AddTask(&dense_map::matchFeatures, &match_mutex, + left_image_it, right_image_it, + cid_to_descriptor_map[left_image_it], + cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], + cid_to_keypoint_map[right_image_it], + verbose, &matches[pair]); + } + thread_pool2.Join(); + + // If feature A in image I matches feather B in image J, which matches feature C in image K, + // then (A, B, C) belong together into a track. Build such a track. + + std::vector, int>> keypoint_map(cams.size()); + + int num_total_matches = 0; // temporary + + // Give all interest points in a given image a unique id + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + + int left_index = index_pair.first; + int right_index = index_pair.second; + + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + keypoint_map[left_index][dist_left_ip] = 0; + keypoint_map[right_index][dist_right_ip] = 0; + num_total_matches++; + } + } + + std::cout << "--bbb num total matches " << num_total_matches << std::endl; + std::cout << "--why so many more matches than pid?" << std::endl; + std::cout << "--test adding missing pairs!" << std::endl; + std::cout << "--must do two passes!" << std::endl; + + // Give all interest points in a given image a unique id + // And put them in a vector with the id corresponding to the interest point + std::vector>> keypoint_vec(cams.size()); + for (size_t cam_it = 0; cam_it < cams.size(); cam_it++) { + int count = 0; + for (auto ip_it = keypoint_map[cam_it].begin(); ip_it != keypoint_map[cam_it].end(); ip_it++) { + ip_it->second = count; + count++; + // std::cout << "--value " << (ip_it->first).first << ' ' << (ip_it->first).second << ' ' + // << ip_it->second << std::endl; + keypoint_vec[cam_it].push_back(ip_it->first); + // std::cout << "--size is " << keypoint_vec[cam_it].size() << std::endl; + } + } + + std::cout << "--write my own function! It should just remove conflicts!" << std::endl; + + openMVG::matching::PairWiseMatches match_map; + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + + int left_index = index_pair.first; + int right_index = index_pair.second; + + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + + std::vector mvg_matches; + + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + + // std::cout << "zzz1 " << left_index << ' ' << dist_left_ip.first << ' ' << + // dist_left_ip.second << ' ' << right_index << ' ' << dist_right_ip.first << ' ' << + // dist_right_ip.second << std::endl; + + int left_id = keypoint_map[left_index][dist_left_ip]; + int right_id = keypoint_map[right_index][dist_right_ip]; + mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); + } + match_map[index_pair] = mvg_matches; + } + + if (FLAGS_verbose) { + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; + + int left_index = index_pair.first; + int right_index = index_pair.second; + + std::cout << "--indices " << left_index << ' ' << right_index << std::endl; + + std::ostringstream oss1; + oss1 << (10000 + left_index) << "_" << cams[left_index].camera_type; + + std::string left_stem = oss1.str(); + std::string left_image = left_stem + ".jpg"; + + std::ostringstream oss2; + oss2 << (10000 + right_index) << "_" << cams[right_index].camera_type; + + std::string right_stem = oss2.str(); + std::string right_image = right_stem + ".jpg"; + + std::string match_file = left_stem + "__" + right_stem + ".match"; + + std::cout << "Writing: " << left_image << ' ' << right_image << ' ' << match_file << std::endl; + dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); + } + } + + std::cout << "Find other things which need deallocation!" << std::endl; + std::cout << "De-allocate images and point clouds when no longer needed!" << std::endl; + + // not needed anymore and take up a lot of RAM + matches.clear(); matches = dense_map::MATCH_MAP(); + keypoint_map.clear(); keypoint_map.shrink_to_fit(); + cid_to_descriptor_map.clear(); cid_to_descriptor_map.shrink_to_fit(); + cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); + + std::vector > pid_to_cid_fid; + { + // Build tracks + // De-allocate these as soon as not needed to save memory + openMVG::tracks::TracksBuilder trackBuilder; + trackBuilder.Build(match_map); // Build: Efficient fusion of correspondences + trackBuilder.Filter(); // Filter: Remove tracks that have conflict + // trackBuilder.ExportToStream(std::cout); + // Export tracks as a map (each entry is a sequence of imageId and featureIndex): + // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} + openMVG::tracks::STLMAPTracks map_tracks; + trackBuilder.ExportToSTL(map_tracks); + + // TODO(oalexan1): Print how many pairwise matches were there before + // and after filtering tracks. + + if (map_tracks.empty()) + LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; + + size_t num_elems = map_tracks.size(); + // Populate the filtered tracks + pid_to_cid_fid.clear(); + pid_to_cid_fid.resize(num_elems); + size_t curr_id = 0; + for (auto itr = map_tracks.begin(); itr != map_tracks.end(); itr++) { + for (auto itr2 = (itr->second).begin(); itr2 != (itr->second).end(); itr2++) { + pid_to_cid_fid[curr_id][itr2->first] = itr2->second; + } + curr_id++; + } + } + + // The transform from every camera to the world + std::vector world_to_cam(cams.size()); + for (size_t it = 0; it < cams.size(); it++) { + int beg_index = cams[it].beg_ref_index; + int end_index = cams[it].end_ref_index; + int cam_type = cams[it].camera_type; + // std::cout << "--ref indices " << beg_index << ' ' << end_index << std::endl; + // std::cout << "--cam type " << cam_type << std::endl; + world_to_cam[it] = dense_map::calc_world_to_cam_trans + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + ref_timestamps[beg_index], ref_timestamps[end_index], + ref_to_cam_timestamp_offsets[cam_type], + cams[it].timestamp); + + // std::cout << "--trans for camera: " << it << ' ' << world_to_cam[it].matrix() << std::endl; + } + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + // std::cout << std::endl; + // std::cout << "pid is " << pid << std::endl; + // std::cout << "---aaa pid size is " << pid_to_cid_fid[pid].size() << std::endl; + // std::cout << "zzz2 "; + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + // std::cout << cid << ' ' << keypoint_vec[cid][fid].first << ' '; + // std::cout << keypoint_vec[cid][fid].second << " "; + } + // std::cout << std::endl; + } + + // Do multiview triangulation + std::vector xyz_vec(pid_to_cid_fid.size()); + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + Eigen::Vector3d mesh_xyz(0, 0, 0); + int num = 0; + + std::vector focal_length_vec; + std::vector world_to_cam_vec; + std::vector pix_vec; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); + + focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); + world_to_cam_vec.push_back(world_to_cam[cid]); + pix_vec.push_back(undist_ip); + } + + // Triangulate n rays emanating from given undistorted and centered pixels + Eigen::Vector3d joint_xyz = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); + + xyz_vec[pid] = joint_xyz; + + // std::cout << "--xyz1 " << pid << ' ' << xyz_vec[pid].transpose() << std::endl; + } + + std::cout << "--must do two passes!" << std::endl; + std::cout << "--must filter by min triangulation angle and points behind camera" << std::endl; + + // std::vector> cid_fid_to_pid; + // sparse_mapping::InitializeCidFidToPid(cams.size(), pid_to_cid_fid, &cid_fid_to_pid); + + std::cout << "---too many outliers!" << std::endl; + + std::cout << "--start selecting!" << std::endl; + + // Set up the variable blocks to optimize for BracketedCamError + std::vector bracketed_cam_block_sizes; + int num_focal_lengths = 1; + int num_distortion_params = 1; // will be overwritten + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_cam_block_sizes.push_back(num_focal_lengths); + bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); + std::cout << "--make bracketed block sizes individual!" << std::endl; + bracketed_cam_block_sizes.push_back(num_distortion_params); // must be last, will be modified later + + // Set up the variable blocks to optimize for RefCamError + std::vector ref_cam_block_sizes; + ref_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + ref_cam_block_sizes.push_back(num_focal_lengths); + ref_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); + std::cout << "--make ref block sizes individual!" << std::endl; + ref_cam_block_sizes.push_back(num_distortion_params); // must be last, will be modified later + + // Set up the variable blocks to optimize for BracketedDepthError + std::vector bracketed_depth_block_sizes; + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for RefDepthError + std::vector ref_depth_block_sizes; + ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + ref_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + ref_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + + // Set up the variable blocks to optimize for the mesh xyz + std::vector mesh_block_sizes; + mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + + std::cout << "must test with the ref cam having depth!" << std::endl; + + // Form the problem + ceres::Problem problem; + std::vector residual_names; + std::vector residual_scales; + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + int cam_type = cams[cid].camera_type; + int beg_ref_index = cams[cid].beg_ref_index; + int end_ref_index = cams[cid].end_ref_index; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + + if (cam_type == ref_cam_type) { + // The cost function of projecting in the ref cam. + ceres::CostFunction* ref_cost_function = + dense_map::RefCamError::Create(dist_ip, ref_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* ref_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // std::cout << "--add block" << std::endl; + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (ref_cost_function, ref_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &xyz_vec[pid][0], + &focal_lengths[cam_type], + &optical_centers[cam_type][0], + &distortions[cam_type][0]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + } + + Eigen::Vector3d depth_xyz(0, 0, 0); + if (!dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) + continue; // could not look up the depth value + + // std::cout << "--depth xyz is " << depth_xyz.transpose() << std::endl; + + // The constraint that the depth points agree with triangulated points + ceres::CostFunction* ref_depth_cost_function + = dense_map::RefDepthError::Create(FLAGS_depth_weight, + depth_xyz, + ref_depth_block_sizes); + + ceres::LossFunction* ref_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back(cam_names[cam_type] + "_depth_x_m"); + residual_names.push_back(cam_names[cam_type] + "_depth_y_m"); + residual_names.push_back(cam_names[cam_type] + "_depth_z_m"); + residual_scales.push_back(FLAGS_depth_weight); + residual_scales.push_back(FLAGS_depth_weight); + residual_scales.push_back(FLAGS_depth_weight); + problem.AddResidualBlock + (ref_depth_cost_function, + ref_depth_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0]); + + if (!FLAGS_float_sparse_map) + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + if (!FLAGS_float_scale) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + + } else { + // Other cameras, which need bracketing + ceres::CostFunction* bracketed_cost_function = + dense_map::BracketedCamError::Create(dist_ip, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* bracketed_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // std::cout << "--add block" << std::endl; + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (bracketed_cost_function, bracketed_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type], + &focal_lengths[cam_type], + &optical_centers[cam_type][0], + &distortions[cam_type][0]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + + Eigen::Vector3d depth_xyz(0, 0, 0); + if (!dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) + continue; // could not look up the depth value + + // std::cout << "--depth xyz is " << depth_xyz.transpose() << std::endl; + + // Ensure that the depth points agree with triangulated points + ceres::CostFunction* bracketed_depth_cost_function + = dense_map::BracketedDepthError::Create(FLAGS_depth_weight, + depth_xyz, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_block_sizes); + + ceres::LossFunction* bracketed_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back(cam_names[cam_type] + "_depth_x_m"); + residual_names.push_back(cam_names[cam_type] + "_depth_y_m"); + residual_names.push_back(cam_names[cam_type] + "_depth_z_m"); + residual_scales.push_back(FLAGS_depth_weight); + residual_scales.push_back(FLAGS_depth_weight); + residual_scales.push_back(FLAGS_depth_weight); + problem.AddResidualBlock + (bracketed_depth_cost_function, + bracketed_depth_loss_function, + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type]); + + if (!FLAGS_float_sparse_map) { + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); + problem.SetParameterBlockConstant + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); + } + if (!FLAGS_float_scale) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + if (!FLAGS_float_timestamp_offsets) { + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + lower_bound[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + upper_bound[cam_type]); + } + } + } + } + + if (FLAGS_mesh != "") { + // Add the mesh constraint + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + Eigen::Vector3d mesh_xyz(0, 0, 0); + int num = 0; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); + + bool have_mesh_intersection = false; + // Try to make the intersection point be on the mesh and the nav cam ray + // to make the sci cam to conform to that. + // TODO(oalexan1): Think more of the range of the ray below + double min_ray_dist = 0.0; + double max_ray_dist = 10.0; + Eigen::Vector3d intersection(0.0, 0.0, 0.0); + have_mesh_intersection + = dense_map::ray_mesh_intersect(undist_ip, cam_params[cams[cid].camera_type], + world_to_cam[cid], mesh, bvh_tree, + min_ray_dist, max_ray_dist, + // Output + intersection); + + if (have_mesh_intersection) { + mesh_xyz += intersection; + num += 1; + } + } + + if (num >= 1) { + mesh_xyz /= num; + + // std::cout << "--num and mesh xyz is " << num << ' ' << mesh_xyz.transpose() << std::endl; + + ceres::CostFunction* mesh_cost_function = + dense_map::XYZError::Create(mesh_xyz, mesh_block_sizes, FLAGS_mesh_weight); + + ceres::LossFunction* mesh_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, + &xyz_vec[pid][0]); + + residual_names.push_back("mesh_x_m"); + residual_names.push_back("mesh_y_m"); + residual_names.push_back("mesh_z_m"); + + residual_scales.push_back(FLAGS_mesh_weight); + residual_scales.push_back(FLAGS_mesh_weight); + residual_scales.push_back(FLAGS_mesh_weight); + } + } + // std::cout << "--xyz1 " << pid << ' ' << xyz_vec[pid].transpose() << std::endl; + } + + // See which intrinsics from which cam to float or keep fixed + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (intrinsics_to_float[cam_type].find("focal_length") == intrinsics_to_float[cam_type].end()) { + std::cout << "For " << cam_names[cam_type] << " not floating focal_length." << std::endl; + problem.SetParameterBlockConstant(&focal_lengths[cam_type]); + } else { + std::cout << "For " << cam_names[cam_type] << " floating focal_length." << std::endl; + } + if (intrinsics_to_float[cam_type].find("optical_center") == intrinsics_to_float[cam_type].end()) { + std::cout << "For " << cam_names[cam_type] << " not floating optical_center." << std::endl; + problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); + } else { + std::cout << "For " << cam_names[cam_type] << " floating optical_center." << std::endl; + } + if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) { + std::cout << "For " << cam_names[cam_type] << " not floating distortion." << std::endl; + problem.SetParameterBlockConstant(&distortions[cam_type][0]); + } else { + std::cout << "For " << cam_names[cam_type] << " floating distortion." << std::endl; + } + } + + // Evaluate the residual before optimization + double total_cost = 0.0; + std::vector residuals; + ceres::Problem::EvaluateOptions eval_options; + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale + residuals[it] /= residual_scales[it]; + dense_map::calc_median_residuals(residuals, residual_names, "before opt"); + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "initial res " << residual_names[it] << " " << residuals[it] << std::endl; + } + + // Copy the offsets to specific cameras + std::cout.precision(18); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + std::cout << "--offset before " << ref_to_cam_timestamp_offsets[cam_type] << std::endl; + + // Solve the problem + ceres::Solver::Options options; + ceres::Solver::Summary summary; + options.linear_solver_type = ceres::ITERATIVE_SCHUR; + options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread + options.max_num_iterations = FLAGS_num_iterations; + options.minimizer_progress_to_stdout = true; + options.gradient_tolerance = 1e-16; + options.function_tolerance = 1e-16; + options.parameter_tolerance = FLAGS_parameter_tolerance; + ceres::Solve(options, &problem, &summary); + + // Evaluate the residual after optimization + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; + for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale + residuals[it] /= residual_scales[it]; + dense_map::calc_median_residuals(residuals, residual_names, "after opt"); + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "final res " << residual_names[it] << " " << residuals[it] << std::endl; + } + + // Copy back the reference transforms + std::cout.precision(18); + std::cout << "--sparse map before\n" << sparse_map->cid_to_cam_t_global_[0].matrix() << std::endl; + + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::array_to_rigid_transform(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + std::cout << "--sparse map after\n" << sparse_map->cid_to_cam_t_global_[0].matrix() << std::endl; + + // Copy back the optimized intrinsics + for (int it = 0; it < num_cam_types; it++) { + cam_params[it].SetFocalLength(Eigen::Vector2d(focal_lengths[it], focal_lengths[it])); + cam_params[it].SetOpticalOffset(optical_centers[it]); + cam_params[it].SetDistortion(distortions[it]); + } + + // Copy back the optimized extrinsics + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::array_to_rigid_transform + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Copy back the depth to image transforms without scales + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::array_to_rigid_transform + (depth_to_image_noscale[cam_type], + &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Copy extrinsics to specific cameras + hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); + scicam_to_hazcam_aff_trans = hazcam_to_navcam_aff_trans.inverse() * ref_to_cam_trans[2].inverse(); + + // Copy intrinsics to the specific cameras + if (FLAGS_nav_cam_intrinsics_to_float != "") nav_cam_params = cam_params[0]; + if (FLAGS_haz_cam_intrinsics_to_float != "") haz_cam_params = cam_params[1]; + if (FLAGS_sci_cam_intrinsics_to_float != "") sci_cam_params = cam_params[2]; + + // Copy the offsets to specific cameras + std::cout.precision(18); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + std::cout << "--offset after " << ref_to_cam_timestamp_offsets[cam_type] << std::endl; + + navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; + scicam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1] + - ref_to_cam_timestamp_offsets[2]; + + std::cout.precision(18); + std::cout << "--navcam_to_hazcam_timestamp_offset " << navcam_to_hazcam_timestamp_offset << std::endl; + std::cout << "--scicam_to_hazcam_timestamp_offset " << scicam_to_hazcam_timestamp_offset << std::endl; + + // Update the optimized depth to image (for haz cam only) + int cam_type = 1; // haz cam + hazcam_depth_to_image_scale = depth_to_image_scales[cam_type]; + hazcam_depth_to_image_transform = depth_to_image_noscale[cam_type]; + hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; + + // Update the config file + { + // update nav and haz + bool update_cam1 = true, update_cam2 = true; + std::string cam1_name = "nav_cam", cam2_name = "haz_cam"; + boost::shared_ptr + cam1_params(new camera::CameraParameters(nav_cam_params)); + boost::shared_ptr + cam2_params(new camera::CameraParameters(haz_cam_params)); + bool update_depth_to_image_transform = true; + bool update_extrinsics = true; + bool update_timestamp_offset = true; + std::string cam1_to_cam2_timestamp_offset_str = "navcam_to_hazcam_timestamp_offset"; + // Modify in-place the robot config file + dense_map::update_config_file(update_cam1, cam1_name, cam1_params, + update_cam2, cam2_name, cam2_params, + update_depth_to_image_transform, + hazcam_depth_to_image_transform, update_extrinsics, + hazcam_to_navcam_aff_trans, update_timestamp_offset, + cam1_to_cam2_timestamp_offset_str, + navcam_to_hazcam_timestamp_offset); + } + { + // update sci and haz cams + // TODO(oalexan1): Write a single function to update all 3 of them + bool update_cam1 = true, update_cam2 = true; + std::string cam1_name = "sci_cam", cam2_name = "haz_cam"; + boost::shared_ptr + cam1_params(new camera::CameraParameters(sci_cam_params)); + boost::shared_ptr + cam2_params(new camera::CameraParameters(haz_cam_params)); + bool update_depth_to_image_transform = true; + bool update_extrinsics = true; + bool update_timestamp_offset = true; + std::string cam1_to_cam2_timestamp_offset_str = "scicam_to_hazcam_timestamp_offset"; + // Modify in-place the robot config file + dense_map::update_config_file(update_cam1, cam1_name, cam1_params, + update_cam2, cam2_name, cam2_params, + update_depth_to_image_transform, + hazcam_depth_to_image_transform, update_extrinsics, + scicam_to_hazcam_aff_trans.inverse(), + update_timestamp_offset, + cam1_to_cam2_timestamp_offset_str, + scicam_to_hazcam_timestamp_offset); + } + + std::cout << "Writing: " << FLAGS_output_map << std::endl; + if (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "") { + std::cout << "Either the sparse map intrinsics or cameras got modified. " + << "The map must be rebuilt." << std::endl; + + dense_map::RebuildMap(FLAGS_output_map, // Will be used for temporary saving of aux data + nav_cam_params, sparse_map); + } + + sparse_map->Save(FLAGS_output_map); + + return 0; +} // NOLINT + diff --git a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py index 518fd2c2..54006b5d 100644 --- a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py +++ b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py @@ -18,79 +18,101 @@ # Create camera files that texrecon will understand. -import sys, os, re, glob, argparse +import argparse +import glob +import os +import re +import sys + import numpy as np -parser = argparse.ArgumentParser(description='Convert cameras to the format of texrecon.') -parser.add_argument('--camera_dir', default = '', help='The directory containing the camera information (the output of geometry_mapper).') -parser.add_argument('--undistorted_image_dir', default = '', help='The directory containing the undistorted images.') -parser.add_argument('--camera_type', default = '', help='The camera type (nav_cam, haz_cam, or sci_cam).') +parser = argparse.ArgumentParser( + description="Convert cameras to the format of texrecon." +) +parser.add_argument( + "--camera_dir", + default="", + help="The directory containing the camera information (the output of geometry_mapper).", +) +parser.add_argument( + "--undistorted_image_dir", + default="", + help="The directory containing the undistorted images.", +) +parser.add_argument( + "--camera_type", default="", help="The camera type (nav_cam, haz_cam, or sci_cam)." +) args = parser.parse_args() if args.camera_dir == "" or args.undistorted_image_dir == "" or args.camera_type == "": - print("Must specify the camera directory, directory of undistorted images, and camera type.") + print( + "Must specify the camera directory, directory of undistorted images, and camera type." + ) sys.exit(1) -if args.camera_type != "nav_cam" and args.camera_type != "haz_cam" \ - and args.camera_type != "sci_cam": +if ( + args.camera_type != "nav_cam" + and args.camera_type != "haz_cam" + and args.camera_type != "sci_cam" +): print("The camera type must be nav_cam, haz_cam, or sci_cam") sys.exit(1) - + # Read the intrinsics intr_file = args.undistorted_image_dir + "/undistorted_intrinsics.txt" if not os.path.exists(intr_file): print("Missing file: " + intr_file) sys.exit(1) -with open(intr_file, 'r') as f: +with open(intr_file, "r") as f: for line in f: - if re.match('^\s*\#', line): - continue # ignore the comments + if re.match("^\s*\#", line): + continue # ignore the comments vals = line.split() if len(vals) < 5: print("Expecting 5 parameters in " + intr_file) sys.exit(1) widx = float(vals[0]) widy = float(vals[1]) - f = float(vals[2]) - cx = float(vals[3]) - cy = float(vals[4]) + f = float(vals[2]) + cx = float(vals[3]) + cy = float(vals[4]) max_wid = widx - if (widy > max_wid): + if widy > max_wid: max_wid = widy - + # normalize - nf = f / max_wid + nf = f / max_wid ncx = cx / widx ncy = cy / widy - d0 = 0.0 - d1 = 0.0 + d0 = 0.0 + d1 = 0.0 paspect = 1.0 - break # finished reading the line we care for - + break # finished reading the line we care for + # Convert the cameras to texrecon's format suffix = "_" + args.camera_type + "_to_world.txt" # Get the cameras to write based on the list of images in the index # We avoid simply doing an ls in that directory to ensure we don't # run into old files -index_file = os.path.join(args.camera_dir, args.camera_type + '_index.txt') +index_file = os.path.join(args.camera_dir, args.camera_type + "_index.txt") with open(index_file, "r") as f: for image_file in f: image_file = image_file.rstrip() image_file = os.path.basename(image_file) - - m = re.match('^(.*?)\.jpg', image_file) + + m = re.match("^(.*?)\.jpg", image_file) if not m: print("Expecting a .jpg file, but got: " + image_file) in_cam = os.path.join(args.camera_dir, m.group(1) + suffix) - + out_cam = args.undistorted_image_dir + "/" + os.path.basename(in_cam) - m = re.match('^(.*?)' + suffix, out_cam) + m = re.match("^(.*?)" + suffix, out_cam) if not m: print("Could not match desired expression.") sys.exit(1) @@ -99,26 +121,34 @@ if not os.path.exists(in_cam): print("Cannot find: " + in_cam) sys.exit(1) - - M = np.loadtxt(in_cam) # camera to world - M = np.linalg.inv(M) # world to camera + + M = np.loadtxt(in_cam) # camera to world + M = np.linalg.inv(M) # world to camera print("Writing: " + out_cam) - with open(out_cam, 'w') as f: + with open(out_cam, "w") as f: # translation - f.write("%0.17g %0.17g %0.17g " % \ - (M[0][3], M[1][3], M[2][3])) + f.write("%0.17g %0.17g %0.17g " % (M[0][3], M[1][3], M[2][3])) # rotation - f.write("%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" % \ - (M[0][0], M[0][1], M[0][2], - M[1][0], M[1][1], M[1][2], - M[2][0], M[2][1], M[2][2])) + f.write( + "%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" + % ( + M[0][0], + M[0][1], + M[0][2], + M[1][0], + M[1][1], + M[1][2], + M[2][0], + M[2][1], + M[2][2], + ) + ) # normaized inrinsics - f.write("%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" % \ - (nf, d0, d1, paspect, ncx, ncy)) - - - + f.write( + "%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" + % (nf, d0, d1, paspect, ncx, ncy) + ) diff --git a/dense_map/geometry_mapper/tools/colorize_bag_images.cc b/dense_map/geometry_mapper/tools/colorize_bag_images.cc new file mode 100644 index 00000000..98965e77 --- /dev/null +++ b/dense_map/geometry_mapper/tools/colorize_bag_images.cc @@ -0,0 +1,153 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +// A tool to make the bag images in color, for testing purposes. The +// chosen colors are rather arbitrary. If the images are already in +// color, their colors will be altered as one can't tell if the image +// read in is color or grayscale, since it will have 3 channels either +// way. Compressed images will be decompressed and won't be compressed +// back. + +#include +#include +#include + +#include +#include + +#include + +#include +#include + +// ROS +#include +#include +#include +#include +#include + +#include +#include +#include + +DEFINE_string(input_bag, "", "The input bag."); + +DEFINE_string(output_bag, "", "The output bag."); + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + + if (FLAGS_input_bag == "" || FLAGS_output_bag == "") { + std::cout << "Not all inputs were specified.\n"; + return 1; + } + + int tmp_count = 10000; + + // Open the output bag + std::cout << "Opening the output bag file: " << FLAGS_output_bag << "\n"; + rosbag::Bag output_bag; + output_bag.open(FLAGS_output_bag, rosbag::bagmode::Write); + + // Get the topics from the input bag + std::vector topics; + dense_map::readTopicsInBag(FLAGS_input_bag, topics); + + rosbag::Bag input_bag; + std::cout << "Opening the input bag file: " << FLAGS_input_bag << "\n"; + input_bag.open(FLAGS_input_bag, rosbag::bagmode::Read); + + rosbag::View view(input_bag, rosbag::TopicQuery(topics)); + for (rosbag::MessageInstance const m : view) { + bool have_image = false; + cv::Mat image; + std_msgs::Header header; + + // Try to read a compressed image + sensor_msgs::CompressedImage::ConstPtr comp_image_msg + = m.instantiate(); + if (comp_image_msg) { + // Convert the compressed image data to cv::Mat + try { + image = cv::imdecode(cv::Mat(comp_image_msg->data), cv::IMREAD_COLOR); + } catch (cv_bridge::Exception const& e) { + LOG(ERROR) << "Unable to convert compressed image to bgr8.\n"; + continue; + } + + // Use the header from the input image, including the timestamp + header = comp_image_msg->header; + have_image = true; + } + + if (!have_image) { + // Try to read a regular uncompressed image + sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); + if (image_msg) { + try { + // Copy from the temporary pointer to a stable location + (cv_bridge::toCvShare(image_msg, "bgr8")->image).copyTo(image); + } catch (cv_bridge::Exception const& e) { + LOG(ERROR) << "Unable to convert image to bgr8.\n"; + continue; + } + + // Use the header from the input image, including the timestamp + header = image_msg->header; + have_image = true; + } + } + + if (!have_image) { + // Write the message without any changes + output_bag.write(m.getTopic(), m.getTime(), m); + continue; + } + + // Tweak the pixels to create color if the input is grayscale + for (int row = 0; row < image.rows; row++) { + for (int col = 0; col < image.cols; col++) { + cv::Vec3b color = image.at(row, col); + int delta = 60; + color[0] = std::max(static_cast(color[0]), delta) - delta; + color[2] = std::max(static_cast(color[2]), delta) - delta; + image.at(row, col) = color; + } + } + + // Form the output image + cv_bridge::CvImage cv_image; + cv_image.image = image; + cv_image.encoding = "bgr8"; + sensor_msgs::Image out_image_msg; + cv_image.toImageMsg(out_image_msg); + + // Use the header from the input image, including the timestamp + out_image_msg.header = header; + + // Write it to the bag to the same topic + output_bag.write(m.getTopic() + "_color", m.getTime(), out_image_msg); + } + + input_bag.close(); + output_bag.close(); + + return 0; +} diff --git a/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc b/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc index 73c028c1..ab4bc5ca 100644 --- a/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc +++ b/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc @@ -118,10 +118,11 @@ int main(int argc, char** argv) { // Convert to an opencv image cv::Mat image; try { - image = cv_bridge::toCvShare(image_msg, "bgr8")->image; + // Do a copy as the message is temporary + (cv_bridge::toCvShare(image_msg, "bgr8")->image).copyTo(image); } catch (cv_bridge::Exception const& e) { try { - image = cv_bridge::toCvShare(image_msg, "32FC1")->image; + (cv_bridge::toCvShare(image_msg, "32FC1")->image).copyTo(image); } catch (cv_bridge::Exception const& e) { LOG(ERROR) << "Unable to convert " << image_msg->encoding.c_str() << " image to bgr8 or 32FC1"; continue; @@ -133,13 +134,15 @@ int main(int argc, char** argv) { } // Extract a compressed image - sensor_msgs::CompressedImage::ConstPtr comp_image_msg = m.instantiate(); + sensor_msgs::CompressedImage::ConstPtr comp_image_msg + = m.instantiate(); if (comp_image_msg) { ros::Time stamp = comp_image_msg->header.stamp; curr_time = stamp.toSec(); // Set up the output filename - form_filename(comp_image_msg->header.seq, curr_time, filename_buffer, sizeof(filename_buffer)); + form_filename(comp_image_msg->header.seq, curr_time, filename_buffer, + sizeof(filename_buffer)); std::string name(filename_buffer); name = output_directory + "/" + name + ".jpg"; diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index 395854e3..0836a161 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -17,6 +17,8 @@ * under the License. */ +// TODO(oalexan1): Modularize this code and move things to utils + #include #include #include @@ -51,7 +53,7 @@ #include #include -// #include // for reading and writing ply files +#include // for reading and writing ply files #include #include @@ -178,7 +180,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, std::vector const& nav_cam_msgs, boost::shared_ptr sparse_map, bool use_brisk_map, - std::vector const& sparse_map_timestamps, Eigen::MatrixXd& desired_cam_to_world_trans, + std::vector const& sparse_map_timestamps, + Eigen::MatrixXd& desired_cam_to_world_trans, int& sparse_map_id, int& nav_cam_bag_id, int& nav_cam_pos) { // Find nav cam images around the nav cam pose to compute std::vector* nearby_cid_ptr = NULL; @@ -210,7 +213,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const cv::Mat beg_image; bool save_grayscale = true; // Localization needs grayscale images double found_time = -1.0; - if (!dense_map::lookupImage(beg_nav_cam_time, nav_cam_msgs, save_grayscale, beg_image, nav_cam_pos, found_time)) + if (!dense_map::lookupImage(beg_nav_cam_time, nav_cam_msgs, save_grayscale, + beg_image, nav_cam_pos, found_time)) return false; camera::CameraModel beg_localized_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), @@ -225,8 +229,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const // the beg image next time around. int end_nav_cam_pos = nav_cam_pos; cv::Mat end_image; - if (!dense_map::lookupImage(end_nav_cam_time, nav_cam_msgs, save_grayscale, end_image, end_nav_cam_pos, - found_time)) + if (!dense_map::lookupImage(end_nav_cam_time, nav_cam_msgs, save_grayscale, end_image, + end_nav_cam_pos, found_time)) return false; camera::CameraModel end_localized_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), sparse_map->GetCameraParameters()); @@ -251,15 +255,18 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const return false; } -typedef cv::Vec Vec4d; +// Store x, y, z, intensity, and weight. +typedef cv::Vec Vec5d; // Check if first three coordinates of a vector are 0 -bool zeroXyz(Vec4d const& p) { return (p[0] == 0) && (p[1] == 0) && (p[2] == 0); } +bool zeroXyz(Vec5d const& p) { return (p[0] == 0) && (p[1] == 0) && (p[2] == 0); } // Add a given vertex to the ply file unless already present -inline void add_vertex(Vec4d const& V, std::pair const& pix, - std::map, size_t>& pix_to_vertex, size_t& vertex_count, - std::vector>& vertices, std::vector>& colors) { +inline void add_vertex(Vec5d const& V, std::pair const& pix, // NOLINT + std::map, size_t>& pix_to_vertex, // NOLINT + size_t& vertex_count, // NOLINT + std::vector>& vertices, // NOLINT + std::vector>& colors) { // NOLINT // Do not add the zero vertex if (zeroXyz(V)) return; @@ -276,15 +283,17 @@ inline void add_vertex(Vec4d const& V, std::pair const& pix, vertex_count++; } -double seg_len(Vec4d const& A, Vec4d const& B) { return Eigen::Vector3d(A[0] - B[0], A[1] - B[1], A[2] - B[2]).norm(); } +double seg_len(Vec5d const& A, Vec5d const& B) { + return Eigen::Vector3d(A[0] - B[0], A[1] - B[1], A[2] - B[2]).norm(); // NOLINT +} -void add_dist(Vec4d const& A, Vec4d const& B, std::vector& distances) { +void add_dist(Vec5d const& A, Vec5d const& B, std::vector& distances) { if (!zeroXyz(A) && !zeroXyz(B)) distances.push_back(seg_len(A, B)); } // If the distance between two points is more than this len, // add more points in between on the edge connecting them. -void add_points(Vec4d const& A, Vec4d const& B, double min_len, std::vector& points) { +void add_points(Vec5d const& A, Vec5d const& B, double min_len, std::vector& points) { if (zeroXyz(A) || zeroXyz(B)) return; double curr_len = seg_len(A, B); @@ -293,11 +302,13 @@ void add_points(Vec4d const& A, Vec4d const& B, double min_len, std::vector(i) / static_cast(num); - Vec4d C = (1.0 - t) * A + t * B; + Vec5d C = (1.0 - t) * A + t * B; points.push_back(C); } } @@ -313,10 +324,10 @@ double median_mesh_edge(cv::Mat const& depthMat) { std::pair pix_ur = std::make_pair(row + 1, col); std::pair pix_ll = std::make_pair(row, col + 1); std::pair pix_lr = std::make_pair(row + 1, col + 1); - Vec4d UL = depthMat.at(pix_ul.first, pix_ul.second); - Vec4d UR = depthMat.at(pix_ur.first, pix_ur.second); - Vec4d LL = depthMat.at(pix_ll.first, pix_ll.second); - Vec4d LR = depthMat.at(pix_lr.first, pix_lr.second); + Vec5d UL = depthMat.at(pix_ul.first, pix_ul.second); + Vec5d UR = depthMat.at(pix_ur.first, pix_ur.second); + Vec5d LL = depthMat.at(pix_ll.first, pix_ll.second); + Vec5d LR = depthMat.at(pix_lr.first, pix_lr.second); add_dist(UL, UR, distances); add_dist(UL, LL, distances); @@ -338,13 +349,15 @@ double median_mesh_edge(cv::Mat const& depthMat) { // Add points on edges and inside triangles having distances longer // than the median edge length times a factor. Store the combined set // in depthMat by adding further rows to it. -void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { +void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero) { double median_len = median_mesh_edge(depthMat); + if (median_len <= 0) return; // should not happen + // Add points if lengths are more than this double min_len = 1.3 * median_len; - std::vector points; + std::vector points; for (int row = 0; row < depthMat.rows - 1; row++) { for (int col = 0; col < depthMat.cols - 1; col++) { @@ -352,13 +365,13 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { std::pair pix_ur = std::make_pair(row + 1, col); std::pair pix_ll = std::make_pair(row, col + 1); std::pair pix_lr = std::make_pair(row + 1, col + 1); - Vec4d UL = depthMat.at(pix_ul.first, pix_ul.second); - Vec4d UR = depthMat.at(pix_ur.first, pix_ur.second); - Vec4d LL = depthMat.at(pix_ll.first, pix_ll.second); - Vec4d LR = depthMat.at(pix_lr.first, pix_lr.second); + Vec5d UL = depthMat.at(pix_ul.first, pix_ul.second); + Vec5d UR = depthMat.at(pix_ur.first, pix_ur.second); + Vec5d LL = depthMat.at(pix_ll.first, pix_ll.second); + Vec5d LR = depthMat.at(pix_lr.first, pix_lr.second); // Add points on each edge - std::vector points1; + std::vector points1; add_points(UL, UR, min_len, points1); add_points(UL, LL, min_len, points1); add_points(UR, LL, min_len, points1); @@ -369,7 +382,7 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { // Valid face // See if to add the triangle center - Vec4d C = (UL + UR + LL) / 3.0; + Vec5d C = (UL + UR + LL) / 3.0; if (seg_len(UL, C) > min_len || seg_len(UR, C) > min_len || seg_len(LL, C) > min_len) points1.push_back(C); // Add points from triangle center to each point added so far @@ -377,7 +390,7 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { } // Add points on each edge of this triangle - std::vector points2; + std::vector points2; add_points(UR, LR, min_len, points2); add_points(UR, LL, min_len, points2); add_points(LR, LL, min_len, points2); @@ -387,7 +400,7 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { // See if to add the triangle center int curr_len2 = points2.size(); - Vec4d C = (UR + LR + LL) / 3.0; + Vec5d C = (UR + LR + LL) / 3.0; if (seg_len(UR, C) > min_len || seg_len(LR, C) > min_len || seg_len(LL, C) > min_len) points2.push_back(C); // Add points from triangle center to each point added so far @@ -414,12 +427,12 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { depthMat.copyTo(workMat); // Resize depthMat, all info in it is now lost, and have only zeros - depthMat = cv::Mat(workMat.rows + num_new_rows, workMat.cols, CV_64FC4, Zero); + depthMat = cv::Mat_(workMat.rows + num_new_rows, workMat.cols, Zero); // Copy the relevant chunk from workMat for (int row = 0; row < workMat.rows; row++) { for (int col = 0; col < workMat.cols; col++) { - depthMat.at(row, col) = workMat.at(row, col); + depthMat.at(row, col) = workMat.at(row, col); } } @@ -431,20 +444,19 @@ void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero) { if (count >= num_points) break; - depthMat.at(row, col) = points[count]; - depthMat.at(row, col)[3] = -1; // so that the mesh does not have it as a face + depthMat.at(row, col) = points[count]; + depthMat.at(row, col)[3] = -1; // so that the mesh does not have it as a face } // Need to make sure to break the outer loop too if (count >= num_points) break; } - if (count != num_points) LOG(FATAL) << "Bookkeeping error. Did not add all the points to the depth map."; + if (count != num_points) + LOG(FATAL) << "Bookkeeping error. Did not add all the points to the depth map."; } -#if 0 // Turning this off till it is decided if to include happly.h or not - -// Form a mesh. Ignore (x, y, z, i) values with (x, y, z) = (0, 0, 0). +// Form a mesh. Ignore (x, y, z, i, w) values with (x, y, z) = (0, 0, 0). // Add a vertex but not a face if i is negative. void save_mesh(cv::Mat const& depthMat, std::string const& plyFileName) { size_t vertex_count = 0; @@ -459,10 +471,10 @@ void save_mesh(cv::Mat const& depthMat, std::string const& plyFileName) { std::pair pix_ur = std::make_pair(row + 1, col); std::pair pix_ll = std::make_pair(row, col + 1); std::pair pix_lr = std::make_pair(row + 1, col + 1); - Vec4d UL = depthMat.at(pix_ul.first, pix_ul.second); - Vec4d UR = depthMat.at(pix_ur.first, pix_ur.second); - Vec4d LL = depthMat.at(pix_ll.first, pix_ll.second); - Vec4d LR = depthMat.at(pix_lr.first, pix_lr.second); + Vec5d UL = depthMat.at(pix_ul.first, pix_ul.second); + Vec5d UR = depthMat.at(pix_ur.first, pix_ur.second); + Vec5d LL = depthMat.at(pix_ll.first, pix_ll.second); + Vec5d LR = depthMat.at(pix_lr.first, pix_lr.second); // Add a vertex even though no face has it as a corner add_vertex(UL, pix_ul, pix_to_vertex, vertex_count, vertices, colors); @@ -502,7 +514,6 @@ void save_mesh(cv::Mat const& depthMat, std::string const& plyFileName) { std::cout << "Writing: " << plyFileName << std::endl; ply.write(plyFileName, happly::DataFormat::ASCII); } -#endif // Find the median of a sorted vector // TODO(oalexan1): May need to put here a partial check that the vector is sorted @@ -518,7 +529,7 @@ double find_median(std::vector const& v) { // Throw out as outliers points whose x, y, or z values differs // from the median of such values by more than this distance. // This can be expensive for win >= 25. -void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int win, double delta) { +void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero, int win, double delta) { // Copy it to workMat, with the latter not being modified below depthMat.copyTo(workMat); @@ -527,7 +538,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w for (int col = 0; col < workMat.cols; col++) { std::vector dist_x, dist_y, dist_z; - Vec4d pt = workMat.at(row, col); + Vec5d pt = workMat.at(row, col); // Is an outlier already if (zeroXyz(pt)) continue; @@ -538,7 +549,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w if (row + irow < 0 || row + irow >= workMat.rows) continue; if (col + icol < 0 || col + icol >= workMat.cols) continue; - Vec4d curr_pt = workMat.at(row + irow, col + icol); + Vec5d curr_pt = workMat.at(row + irow, col + icol); if (zeroXyz(curr_pt)) continue; @@ -550,7 +561,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w if (dist_x.size() < std::max(2, std::min(5, win))) { // so few neighbors, could just toss it out - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; continue; } @@ -560,7 +571,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w if (std::abs(find_median(dist_x) - pt[0]) > delta || std::abs(find_median(dist_y) - pt[1]) > delta || std::abs(find_median(dist_z) - pt[2]) > delta) { - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; } } } @@ -571,7 +582,7 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec4d const& Zero, int w // ray from the existing point to the new point being almost parallel // to existing nearby rays (emanating from camera center) which is not // good. -void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Zero, int radius1, int radius2, +void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Zero, int radius1, int radius2, double foreshortening_delta) { // Copy depthMat to workMat, with the latter unchanged below depthMat.copyTo(workMat); @@ -581,8 +592,8 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z for (int col = 0; col < depthMat.cols; col++) { // Points which do not need to be filled can be skipped // and let them keep their existing value - if (!zeroXyz(workMat.at(row, col))) { - depthMat.at(row, col) = workMat.at(row, col); + if (!zeroXyz(workMat.at(row, col))) { + depthMat.at(row, col) = workMat.at(row, col); continue; } @@ -590,7 +601,7 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z // A point on an axis counts towards both quadrants it touches. cv::Mat valid(cv::Size(2, 2), CV_8U, cv::Scalar(0)); - Vec4d pt_sum = Zero; + Vec5d pt_sum = Zero; double wt_sum = 0.0; for (int irow = -radius1; irow <= radius1; irow++) { @@ -604,7 +615,7 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z // Look only within given radius if (dist > static_cast(radius1)) continue; - Vec4d curr_pt = workMat.at(row + irow, col + icol); + Vec5d curr_pt = workMat.at(row + irow, col + icol); // Note that these are not exclusive, as points on axes // can be in multiple quadrants @@ -653,11 +664,11 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z if (num_good_quadrants < 4) will_accept = false; if (!will_accept || wt_sum == 0.0) { - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; continue; } - Vec4d new_pt = pt_sum / wt_sum; + Vec5d new_pt = pt_sum / wt_sum; Eigen::Vector3d Z(0, 0, 1); // See if this point results in rays which are too parallel to existing rays @@ -672,7 +683,7 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z // Check only within given radius if (dist > static_cast(radius2)) continue; - Vec4d curr_pt = workMat.at(row + irow, col + icol); + Vec5d curr_pt = workMat.at(row + irow, col + icol); if (zeroXyz(curr_pt)) continue; // Not a valid point @@ -696,19 +707,19 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Z } if (!will_accept) { - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; continue; } // accept - depthMat.at(row, col) = new_pt; + depthMat.at(row, col) = new_pt; } } } // Smooth newly added points and also their immediate neighbors -void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec4d const& Zero, - int radius) { +void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMat, + double sigma, Vec5d const& Zero, int radius) { // Copy depthMat to workMat, with the latter not being changed below. // Note how we use origMat to keep track of points which did not // change recently. @@ -725,7 +736,7 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa if (row + irow < 0 || row + irow >= depthMat.rows) continue; if (col + icol < 0 || col + icol >= depthMat.cols) continue; - if (zeroXyz(origMat.at(row + irow, col + icol))) { + if (zeroXyz(origMat.at(row + irow, col + icol))) { isNew = true; break; } @@ -736,9 +747,9 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa if (!isNew) continue; // Do not smooth points which have no good value to start with - if (zeroXyz(workMat.at(row, col))) continue; + if (zeroXyz(workMat.at(row, col))) continue; - Vec4d pt_sum = Zero; + Vec5d pt_sum = Zero; double wt_sum = 0.0; for (int irow = -radius; irow <= radius; irow++) { @@ -751,7 +762,7 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa if (dist > static_cast(radius)) continue; - Vec4d curr_pt = workMat.at(row + irow, col + icol); + Vec5d curr_pt = workMat.at(row + irow, col + icol); if (zeroXyz(curr_pt)) continue; // Not a valid point @@ -764,8 +775,37 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa } } - Vec4d new_pt = pt_sum / wt_sum; - depthMat.at(row, col) = new_pt; + Vec5d new_pt = pt_sum / wt_sum; + depthMat.at(row, col) = new_pt; + } + } +} + +// Add weights which are higher at image center and decrease towards +// boundary. This makes voxblox blend better. +void calc_weights(cv::Mat& depthMat, double exponent) { + for (int row = 0; row < depthMat.rows; row++) { +#pragma omp parallel for + for (int col = 0; col < depthMat.cols; col++) { + if (zeroXyz(depthMat.at(row, col))) continue; + + double drow = row - depthMat.rows / 2.0; + double dcol = col - depthMat.cols / 2.0; + double dist_sq = drow * drow + dcol * dcol; + + // Some kind of function decaying from center + float weight = 1.0 / pow(1.0 + dist_sq, exponent/2.0); + + // Also, points that are further in z are given less weight + double z = depthMat.at(row, col)[2]; + weight *= 1.0 / (0.001 + z * z); + + // Make sure the weight does not get too small as voxblox + // will read it as a float. Here making the weights + // just a little bigger than what voxblox will accept. + weight = std::max(weight, static_cast(1.1e-6)); + + depthMat.at(row, col)[4] = weight; } } } @@ -773,13 +813,15 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat const& haz_cam_intensity, Eigen::Affine3d const& hazcam_depth_to_image_transform, int depth_exclude_columns, int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, - std::vector const& median_filter_params, bool save_debug_data, - const char* filename_buffer, Eigen::MatrixXd const& desired_cam_to_world_trans) { + double reliability_weight_exponent, std::vector const& median_filter_params, + bool save_debug_data, const char* filename_buffer, + Eigen::MatrixXd const& desired_cam_to_world_trans) { // Sanity checks if (static_cast(haz_cam_intensity.cols) != static_cast(pc_msg->width) || static_cast(haz_cam_intensity.rows) != static_cast(pc_msg->height)) LOG(FATAL) << "Depth image dimensions does not agree with point cloud dimensions."; - if (dense_map::matType(haz_cam_intensity) != "8UC1") LOG(FATAL) << "Extracted depth image should be of type: 8UC1."; + if (dense_map::matType(haz_cam_intensity) != "8UC3") + LOG(FATAL) << "Extracted depth image should be of type 8UC3."; // Extract the depth point cloud from the message pcl::PointCloud pc; @@ -789,11 +831,13 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Two constants float inf = std::numeric_limits::infinity(); - Vec4d Zero(0, 0, 0, 0); + Vec5d Zero(0, 0, 0, 0, 0); - // A matrix storing the depth cloud and a temporary work matrix - cv::Mat depthMat(haz_cam_intensity.rows, haz_cam_intensity.cols, CV_64FC4, Zero); - cv::Mat workMat(haz_cam_intensity.rows, haz_cam_intensity.cols, CV_64FC4, Zero); + // A matrix storing the depth cloud and a temporary work matrix. + // These have 5 channels: x, y, z, intensity, and weight. The weight + // determines the reliability of a point. + cv::Mat_ depthMat(haz_cam_intensity.rows, haz_cam_intensity.cols, Zero); + cv::Mat_ workMat(haz_cam_intensity.rows, haz_cam_intensity.cols, Zero); // Populate depthMat int count = -1; @@ -803,15 +847,18 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co double x = pc.points[count].x; double y = pc.points[count].y; double z = pc.points[count].z; - double i = haz_cam_intensity.at(row, col); + + // Pick first channel. The precise color of the depth cloud is not important. + // It will be wiped later during smoothing and hole-filling anyway. + double i = haz_cam_intensity.at(row, col)[0]; bool skip = (col < depth_exclude_columns || depthMat.cols - col < depth_exclude_columns || row < depth_exclude_rows || depthMat.rows - row < depth_exclude_rows); if ((x == 0 && y == 0 && z == 0) || skip) - depthMat.at(row, col) = Zero; + depthMat.at(row, col) = Zero; else - depthMat.at(row, col) = Vec4d(x, y, z, i); + depthMat.at(row, col) = Vec5d(x, y, z, i, 0); } } @@ -838,20 +885,22 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co int radius = depth_hole_fill_diameter / 4.0; if (depth_hole_fill_diameter > 0) smooth_additions(origMat, depthMat, workMat, sigma, Zero, radius); + // This is the right place at which to add weights, before adding + // extra points messes up with the number of rows in in depthMat. + calc_weights(depthMat, reliability_weight_exponent); + // Add extra points, but only if we are committed to manipulating the // depth cloud to start with if (depth_hole_fill_diameter > 0) add_extra_pts(depthMat, workMat, Zero); -#if 0 if (save_debug_data) { // Save the updated depthMat as a mesh (for debugging) std::string plyFileName = filename_buffer + std::string(".ply"); save_mesh(depthMat, plyFileName); } -#endif // Initialize point cloud that we will pass to voxblox - pcl::PointCloud pci; + pcl::PointCloud pci; pci.width = depthMat.cols; pci.height = depthMat.rows; pci.points.resize(pci.width * pci.height); @@ -861,7 +910,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co for (int col = 0; col < depthMat.cols; col++) { count++; - Vec4d pt = depthMat.at(row, col); + Vec5d pt = depthMat.at(row, col); // An outlier if (zeroXyz(pt)) { @@ -869,7 +918,10 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co pci.points[count].x = inf; pci.points[count].y = inf; pci.points[count].z = inf; - pci.points[count].intensity = 0; + pci.points[count].normal_x = 0; // intensity + pci.points[count].normal_y = 0; // weight + pci.points[count].normal_z = 0; // ensure initialization + pci.points[count].curvature = 0; // ensure initialization continue; } @@ -880,8 +932,12 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co pci.points[count].z = X[2]; if (pt[3] <= 0) pt[3] = 100; // Ensure a positive value for the color + if (pt[4] < 0) pt[4] = 0; // Ensure a non-negative weight - pci.points[count].intensity = pt[3]; + pci.points[count].normal_x = pt[3]; // intensity + pci.points[count].normal_y = pt[4]; // weight + pci.points[count].normal_z = 0; // ensure initialization + pci.points[count].curvature = 0; // ensure initialization } } @@ -890,14 +946,20 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co pcl::io::savePCDFileASCII(filename_buffer, pci); if (save_debug_data) { - // Find the image of distances in depthMat, for debugging. Note + // Find the image of distances in depthMat, for debugging. Note // that we ignore the extra points we added as new rows depthMat in // add_extra_pts(). + + // The depthMat may have extra rows, but we assume the number of columns + // did not change. + if (haz_cam_intensity.cols != depthMat.cols) + LOG(FATAL) << "Incorrect number of columns in depthMat\n"; + cv::Mat depthDist(haz_cam_intensity.rows, haz_cam_intensity.cols, CV_32FC1, cv::Scalar(0)); count = -1; for (int row = 0; row < depthDist.rows; row++) { for (int col = 0; col < depthDist.cols; col++) { - Vec4d pt = depthMat.at(row, col); + Vec5d pt = depthMat.at(row, col); Eigen::Vector3d eigen_pt(pt[0], pt[1], pt[2]); double dist = eigen_pt.norm(); depthDist.at(row, col) = dist; @@ -930,11 +992,10 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co std::cout << "Writing: " << fileName << std::endl; cv::imwrite(fileName, 255.0 * (depthDist - min_dist) / (max_dist - min_dist)); -#if 0 // Transform (modifies depthMat) and save the transformed mesh. For debugging. for (int row = 0; row < depthMat.rows; row++) { for (int col = 0; col < depthMat.cols; col++) { - Vec4d pt = depthMat.at(row, col); + Vec5d pt = depthMat.at(row, col); if (zeroXyz(pt)) continue; @@ -949,24 +1010,21 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co V = desired_cam_to_world_trans * V; for (size_t it = 0; it < 3; it++) - depthMat.at(row, col)[it] = V[it]; + depthMat.at(row, col)[it] = V[it]; } } std::string plyFileName = filename_buffer + std::string("_trans.ply"); save_mesh(depthMat, plyFileName); -#endif } } - -#if 0 - // This is an instructive code to add to saveCameraPoses() // to compare the sci cam pose as found via localizing // the nav cam images vs localizing the sci cam image directly. // The conclusion is that the sci cam calibration needs // improvement, as the results are all over the place. +#if 0 void localize_sci_cam_directly() { std::cout.precision(8); while (cam_type == "sci_cam" && !use_brisk_map) { @@ -1072,573 +1130,612 @@ void localize_sci_cam_directly() { } #endif - // Given a desired camera and a set of acquisition timestamps for it, - // find the nav cam images nearest in time bracketing each timestamp, - // find the poses of those images using localization, interpolate the - // nav cam pose at the desired timestamp, and apply the transform - // between the nav cam and the desired camera to find the pose of the - // desired camera (by which it is meant the transform from the desired - // camera to the world). - - // Also write the extracted point clouds and the desired cam (nav or sci) images. - - void saveCameraPoses(bool simulated_data, std::string const& cam_type, - double desired_cam_to_nav_cam_offset, - Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, - Eigen::Affine3d& hazcam_depth_to_image_transform, - int depth_exclude_columns, int depth_exclude_rows, - double foreshortening_delta, double depth_hole_fill_diameter, - std::vector const& median_filter_params, - bool save_debug_data, - std::vector const& nav_cam_msgs, - std::vector const& sci_cam_msgs, - std::vector const& haz_cam_points_msgs, - std::vector const& haz_cam_intensity_msgs, - std::vector const& nav_cam_bag_timestamps, - std::map> const& sci_cam_exif, - std::string const& output_dir, std::string const& desired_cam_dir, - double start, double duration, - double sampling_spacing_seconds, - double dist_between_processed_cams, std::set const& sci_cam_timestamps, - double max_iso_times_exposure, - boost::shared_ptr sparse_map, bool use_brisk_map, - dense_map::StampedPoseStorage const& sim_desired_cam_poses, - std::string const& external_mesh) { - double min_map_timestamp = std::numeric_limits::max(); - double max_map_timestamp = -min_map_timestamp; - std::vector sparse_map_timestamps; - if (!simulated_data && !use_brisk_map) { - // Find the minimum and maximum timestamps in the sparse map - const std::vector& images = sparse_map->cid_to_filename_; - sparse_map_timestamps.resize(images.size()); - for (size_t cid = 0; cid < images.size(); cid++) { - double timestamp = dense_map::fileNameToTimestamp(images[cid]); - sparse_map_timestamps[cid] = timestamp; - min_map_timestamp = std::min(min_map_timestamp, sparse_map_timestamps[cid]); - max_map_timestamp = std::max(max_map_timestamp, sparse_map_timestamps[cid]); - } +// Given a desired camera and a set of acquisition timestamps for it, +// find the nav cam images nearest in time bracketing each timestamp, +// find the poses of those images using localization, interpolate the +// nav cam pose at the desired timestamp, and apply the transform +// between the nav cam and the desired camera to find the pose of the +// desired camera (by which it is meant the transform from the desired +// camera to the world). + +// Also write the extracted point clouds and the desired cam (nav or sci) images. + +void saveCameraPoses(bool simulated_data, std::string const& cam_type, + double desired_cam_to_nav_cam_offset, + Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, + Eigen::Affine3d& hazcam_depth_to_image_transform, + int depth_exclude_columns, int depth_exclude_rows, + double foreshortening_delta, double depth_hole_fill_diameter, + double reliability_weight_exponent, + std::vector const& median_filter_params, + bool save_debug_data, + std::vector const& nav_cam_msgs, + std::vector const& sci_cam_msgs, + std::vector const& haz_cam_points_msgs, + std::vector const& haz_cam_intensity_msgs, + std::vector const& nav_cam_bag_timestamps, + std::map> const& sci_cam_exif, + std::string const& output_dir, std::string const& desired_cam_dir, + double start, double duration, + double sampling_spacing_seconds, + double dist_between_processed_cams, std::set const& sci_cam_timestamps, + double max_iso_times_exposure, + boost::shared_ptr sparse_map, bool use_brisk_map, + dense_map::StampedPoseStorage const& sim_desired_cam_poses, + std::string const& external_mesh) { + double min_map_timestamp = std::numeric_limits::max(); + double max_map_timestamp = -min_map_timestamp; + std::vector sparse_map_timestamps; + if (!simulated_data && !use_brisk_map) { + // Find the minimum and maximum timestamps in the sparse map + const std::vector& images = sparse_map->cid_to_filename_; + sparse_map_timestamps.resize(images.size()); + for (size_t cid = 0; cid < images.size(); cid++) { + double timestamp = dense_map::fileNameToTimestamp(images[cid]); + sparse_map_timestamps[cid] = timestamp; + min_map_timestamp = std::min(min_map_timestamp, sparse_map_timestamps[cid]); + max_map_timestamp = std::max(max_map_timestamp, sparse_map_timestamps[cid]); } + } - // Open a handle for writing the index of files being written to disk - std::string index_file = output_dir + "/" + cam_type + "_index.txt"; - std::ofstream ofs(index_file.c_str()); - - // Keep track of where in the bags we are. - int desired_cam_pos = 0; // can be either nav, sci, or haz cam intensity position - int nav_cam_pos = 0; // track separately the nav cam position as well - - // These two are camera ids in the map used for different - // purposes. We increment them as we travel forward in time. - int sparse_map_id = 0, nav_cam_bag_id = 0; - - // These are used to track time as the bag is traversed - double beg_time = -1.0, prev_time = -1.0; - - // This is used to ensure cameras are not too close - Eigen::Vector3d prev_cam_ctr(std::numeric_limits::quiet_NaN(), 0, 0); - - // Decide on which messages to process - std::vector const* desired_cam_msgs = NULL; - if (cam_type == "haz_cam") { - desired_cam_msgs = &haz_cam_points_msgs; // points topic - } else if (cam_type == "sci_cam") { - desired_cam_msgs = &sci_cam_msgs; // image topic - } else if (cam_type == "nav_cam") { - desired_cam_msgs = &nav_cam_msgs; // image topic - } else { - LOG(FATAL) << "Unknown camera type: " << cam_type; - } + // Open a handle for writing the index of files being written to disk + std::string index_file = output_dir + "/" + cam_type + "_index.txt"; + std::ofstream ofs(index_file.c_str()); + + // For the haz cam, need an image index, which will be haz_cam_index.txt, + // and a point cloud index, which will be depth_cam_index.txt. + std::string depth_index_file = output_dir + "/" + "depth_cam" + "_index.txt"; + std::ofstream depth_ofs; + if (cam_type == "haz_cam") depth_ofs = std::ofstream(depth_index_file.c_str()); + + // Keep track of where in the bags we are. + int desired_cam_pos = 0; // can be either nav, sci, or haz cam intensity position + int nav_cam_pos = 0; // track separately the nav cam position as well + + // These two are camera ids in the map used for different + // purposes. We increment them as we travel forward in time. + int sparse_map_id = 0, nav_cam_bag_id = 0; + + // These are used to track time as the bag is traversed + double beg_time = -1.0, prev_time = -1.0; + + // This is used to ensure cameras are not too close + Eigen::Vector3d prev_cam_ctr(std::numeric_limits::quiet_NaN(), 0, 0); + + // Decide on which messages to process + std::vector const* desired_cam_msgs = NULL; + if (cam_type == "haz_cam") { + desired_cam_msgs = &haz_cam_points_msgs; // points topic + } else if (cam_type == "sci_cam") { + desired_cam_msgs = &sci_cam_msgs; // image topic + } else if (cam_type == "nav_cam") { + desired_cam_msgs = &nav_cam_msgs; // image topic + } else { + LOG(FATAL) << "Unknown camera type: " << cam_type; + } - if (external_mesh == "") { - // Compute the starting bag time as the timestamp of the first cloud - for (auto& m : haz_cam_points_msgs) { - if (beg_time < 0) { - sensor_msgs::PointCloud2::ConstPtr pc_msg; - pc_msg = m.instantiate(); - if (pc_msg) { - beg_time = pc_msg->header.stamp.toSec(); - break; - } + if (external_mesh == "") { + // Compute the starting bag time as the timestamp of the first cloud + for (auto& m : haz_cam_points_msgs) { + if (beg_time < 0) { + sensor_msgs::PointCloud2::ConstPtr pc_msg; + pc_msg = m.instantiate(); + if (pc_msg) { + beg_time = pc_msg->header.stamp.toSec(); + break; } } - } else { - // Use an external mesh, so ignore the depth clouds (may even be absent) - beg_time = min_map_timestamp; } + } else { + // Use an external mesh, so ignore the depth clouds (may even be absent) + beg_time = min_map_timestamp; + } - for (auto& m : *desired_cam_msgs) { - sensor_msgs::PointCloud2::ConstPtr pc_msg; - double curr_time = -1.0; + for (auto& m : *desired_cam_msgs) { + sensor_msgs::PointCloud2::ConstPtr pc_msg; + double curr_time = -1.0; - if (cam_type == "haz_cam") { - // haz cam - pc_msg = m.instantiate(); - if (!pc_msg) continue; - curr_time = pc_msg->header.stamp.toSec(); + if (cam_type == "haz_cam") { + // haz cam + pc_msg = m.instantiate(); + if (!pc_msg) continue; + curr_time = pc_msg->header.stamp.toSec(); + } else { + // sci cam or nav cam + // Check for image + sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); + if (image_msg) { + curr_time = image_msg->header.stamp.toSec(); } else { - // sci cam or nav cam - // Check for image - sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); - if (image_msg) { - curr_time = image_msg->header.stamp.toSec(); + // Check for compressed image + sensor_msgs::CompressedImage::ConstPtr comp_image_msg + = m.instantiate(); + if (comp_image_msg) { + curr_time = comp_image_msg->header.stamp.toSec(); } else { - // Check for compressed image - sensor_msgs::CompressedImage::ConstPtr comp_image_msg = m.instantiate(); - if (comp_image_msg) { - curr_time = comp_image_msg->header.stamp.toSec(); - } else { - // Not an image or compressed image topic - continue; - } + // Not an image or compressed image topic + continue; } } + } - bool custom_timestamps = (cam_type == "sci_cam" && !sci_cam_timestamps.empty()); - if (!simulated_data && !use_brisk_map && !custom_timestamps) { - if (curr_time < min_map_timestamp) continue; // Do not process data before the sparse map starts - if (curr_time > max_map_timestamp) break; // Do not process data after the sparse map ends - } - - if (curr_time - beg_time < start) continue; // did not yet reach the desired starting time - - if (duration > 0 && curr_time - beg_time > start + duration) break; // past the desired ending time - - // The pose we will solve for - bool lookup_success = false; - Eigen::MatrixXd desired_cam_to_world_trans; + bool custom_timestamps = (cam_type == "sci_cam" && !sci_cam_timestamps.empty()); + if (!simulated_data && !use_brisk_map && !custom_timestamps) { + if (curr_time < min_map_timestamp) continue; // Do not process data before the sparse map starts + if (curr_time > max_map_timestamp) break; // Do not process data after the sparse map ends + } - // Let some time elapse between fusing clouds - if (std::abs(curr_time - prev_time) < sampling_spacing_seconds && !custom_timestamps) { - continue; - } + if (curr_time - beg_time < start) continue; // did not yet reach the desired starting time - // The case of custom timestamps - if (custom_timestamps && sci_cam_timestamps.find(curr_time) == sci_cam_timestamps.end()) continue; - - if (!simulated_data) { - // Do localization based on nav cam, transform the pose to desired cam coordinates, etc - // Note that we convert from the desired time to the nav cam time for this operation - if (!findInterpolatedPose(curr_time + desired_cam_to_nav_cam_offset, nav_cam_bag_timestamps, - desired_cam_to_nav_cam_transform, nav_cam_msgs, sparse_map, use_brisk_map, - sparse_map_timestamps, desired_cam_to_world_trans, sparse_map_id, nav_cam_bag_id, - nav_cam_pos)) { - std::cout.precision(17); - std::cout << "Localization failed at time " << curr_time << std::endl; - std::cout << "if too many such failures, consider modifying the value of " - << "--localization_options. See the documentation for more info." << std::endl; - continue; - } - } else { - // In this case the logic is much simpler. We already have the - // desired camera poses. Yet, the poses and images for a given - // camera need not be acquired at the same time, even in - // simulation, so, do interpolation. - Eigen::Affine3d out_pose; - double max_gap = 1.0e+10; // hoping the simulator is functioning well - if (!sim_desired_cam_poses.interpPose(curr_time, max_gap, out_pose)) continue; - desired_cam_to_world_trans = out_pose.matrix(); - } + if (duration > 0 && curr_time - beg_time > start + duration) + break; // past the desired ending time - // Success localizing. Update the time at which it took place for next time. - prev_time = curr_time; + // The pose we will solve for + bool lookup_success = false; + Eigen::MatrixXd desired_cam_to_world_trans; - std::cout << "Time elapsed since the bag started: " << curr_time - beg_time << "\n"; + // Let some time elapse between fusing clouds + if (std::abs(curr_time - prev_time) < sampling_spacing_seconds && !custom_timestamps) { + continue; + } - // See if to skip some images if the robot did not move much - Eigen::Affine3d curr_trans; - curr_trans.matrix() = desired_cam_to_world_trans; - Eigen::Vector3d curr_cam_ctr = curr_trans.translation(); - double curr_dist_bw_cams = (prev_cam_ctr - curr_cam_ctr).norm(); - if (!std::isnan(prev_cam_ctr[0]) && !custom_timestamps && curr_dist_bw_cams < dist_between_processed_cams) { - std::cout << "Distance to previously processed camera is " << curr_dist_bw_cams << ", skipping it." - << std::endl; + // The case of custom timestamps + if (custom_timestamps && sci_cam_timestamps.find(curr_time) == sci_cam_timestamps.end()) continue; + + if (!simulated_data) { + // Do localization based on nav cam, transform the pose to desired cam coordinates, etc + // Note that we convert from the desired time to the nav cam time for this operation + if (!findInterpolatedPose(curr_time + desired_cam_to_nav_cam_offset, nav_cam_bag_timestamps, + desired_cam_to_nav_cam_transform, nav_cam_msgs, sparse_map, + use_brisk_map, + sparse_map_timestamps, desired_cam_to_world_trans, + sparse_map_id, nav_cam_bag_id, + nav_cam_pos)) { + std::cout.precision(17); + std::cout << "Localization failed at time " << curr_time << std::endl; + std::cout << "if too many such failures, consider modifying the value of " + << "--localization_options. See the documentation for more info." << std::endl; continue; } + } else { + // In this case the logic is much simpler. We already have the + // desired camera poses. Yet, the poses and images for a given + // camera need not be acquired at the same time, even in + // simulation, so, do interpolation. + Eigen::Affine3d out_pose; + double max_gap = 1.0e+10; // hoping the simulator is functioning well + if (!sim_desired_cam_poses.interpPose(curr_time, max_gap, out_pose)) continue; + desired_cam_to_world_trans = out_pose.matrix(); + } - char filename_buffer[1000]; - std::string suffix = cam_type + "_to_world"; - - if (cam_type == "haz_cam") { - cv::Mat haz_cam_intensity; - bool save_grayscale = true; - - // Normally try to look up the image time right after the current cloud time - double desired_image_time = curr_time; - double found_time = -1.0; - if (!dense_map::lookupImage(desired_image_time, haz_cam_intensity_msgs, save_grayscale, haz_cam_intensity, - desired_cam_pos, found_time)) - continue; + // Success localizing. Update the time at which it took place for next time. + prev_time = curr_time; + + std::cout << "Time elapsed since the bag started: " << curr_time - beg_time << "\n"; + + // See if to skip some images if the robot did not move much + Eigen::Affine3d curr_trans; + curr_trans.matrix() = desired_cam_to_world_trans; + Eigen::Vector3d curr_cam_ctr = curr_trans.translation(); + double curr_dist_bw_cams = (prev_cam_ctr - curr_cam_ctr).norm(); + if (!std::isnan(prev_cam_ctr[0]) && !custom_timestamps && + curr_dist_bw_cams < dist_between_processed_cams) { + std::cout << "Distance to previously processed camera is " + << curr_dist_bw_cams << ", skipping it." + << std::endl; + continue; + } - // Save the transform (only if the above lookup was successful) - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", output_dir.c_str(), curr_time, - suffix.c_str()); - dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + char filename_buffer[1000]; + std::string suffix = cam_type + "_to_world"; - // Save the transform name in the index file - ofs << filename_buffer << "\n"; + if (cam_type == "haz_cam") { + cv::Mat haz_cam_intensity; + bool save_grayscale = false; // Get a color image, if possible - // Save the cloud - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.pcd", output_dir.c_str(), curr_time); + // Normally try to look up the image time right after the current cloud time + double desired_image_time = curr_time; + double found_time = -1.0; + if (!dense_map::lookupImage(desired_image_time, haz_cam_intensity_msgs, + save_grayscale, haz_cam_intensity, + desired_cam_pos, found_time)) + continue; - // Append the intensity to the point cloud and save it - save_proc_depth_cloud(pc_msg, haz_cam_intensity, hazcam_depth_to_image_transform, depth_exclude_columns, - depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, median_filter_params, - save_debug_data, filename_buffer, desired_cam_to_world_trans); + // Save the transform (only if the above lookup was successful) + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", output_dir.c_str(), + curr_time, suffix.c_str()); + dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + + // Save the transform name in the index file + depth_ofs << filename_buffer << "\n"; // for the depth + + // Append the intensity to the point cloud and save it + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.pcd", + output_dir.c_str(), curr_time); + save_proc_depth_cloud(pc_msg, haz_cam_intensity, hazcam_depth_to_image_transform, + depth_exclude_columns, + depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, + reliability_weight_exponent, median_filter_params, + save_debug_data, filename_buffer, + desired_cam_to_world_trans); + + // Save the name of the point cloud to the index + depth_ofs << filename_buffer << "\n"; + + // Save the image + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", + desired_cam_dir.c_str(), curr_time); + std::cout << "Writing: " << filename_buffer << std::endl; + cv::imwrite(filename_buffer, haz_cam_intensity); - // Save the name of the point cloud to the index - ofs << filename_buffer << "\n"; + // Save the name of the image in the index + ofs << filename_buffer << "\n"; - } else { - // Save the nav or sci cam image at the given timestamp. - cv::Mat desired_cam_image; - bool save_grayscale = false; - double found_time = -1.0; - if (!dense_map::lookupImage(curr_time, *desired_cam_msgs, save_grayscale, desired_cam_image, desired_cam_pos, - found_time)) - continue; + } else { + // Save the nav or sci cam image at the given timestamp. + cv::Mat desired_cam_image; + bool save_grayscale = false; + double found_time = -1.0; + if (!dense_map::lookupImage(curr_time, *desired_cam_msgs, save_grayscale, + desired_cam_image, desired_cam_pos, found_time)) + continue; - // Save the transform (only if the above lookup was successful) - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", output_dir.c_str(), curr_time, - suffix.c_str()); - dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); - - // This is very useful for debugging things with ASP. - - // A debug utility for saving a camera in format ASP understands - // if (cam_type == "sci_cam") - // dense_map::save_tsai_camera(desired_cam_to_world_trans, output_dir, curr_time, suffix); - - // Apply an optional scale. Use a pointer to not copy the data more than - // one has to. - cv::Mat* img_ptr = &desired_cam_image; - cv::Mat scaled_image; - if (!simulated_data && cam_type == "sci_cam") { - auto exif_it = sci_cam_exif.find(curr_time); - if (exif_it != sci_cam_exif.end()) { - std::vector const& exif = exif_it->second; - double iso = exif[dense_map::ISO]; - double exposure = exif[dense_map::EXPOSURE_TIME]; - dense_map::exposureCorrection(max_iso_times_exposure, iso, exposure, *img_ptr, scaled_image); - img_ptr = &scaled_image; - } + // Save the transform (only if the above lookup was successful) + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", + output_dir.c_str(), curr_time, suffix.c_str()); + dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + + // This is very useful for debugging things with ASP. + + // A debug utility for saving a camera in format ASP understands + // if (cam_type == "sci_cam") + // dense_map::save_tsai_camera(desired_cam_to_world_trans, output_dir, curr_time, suffix); + + // Apply an optional scale. Use a pointer to not copy the data more than + // one has to. + cv::Mat* img_ptr = &desired_cam_image; + cv::Mat scaled_image; + if (!simulated_data && cam_type == "sci_cam") { + auto exif_it = sci_cam_exif.find(curr_time); + if (exif_it != sci_cam_exif.end()) { + std::vector const& exif = exif_it->second; + double iso = exif[dense_map::ISO]; + double exposure = exif[dense_map::EXPOSURE_TIME]; + dense_map::exposureCorrection(max_iso_times_exposure, iso, exposure, + *img_ptr, scaled_image); + img_ptr = &scaled_image; } - - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", desired_cam_dir.c_str(), curr_time); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, *img_ptr); - - // Save the name of the image in the index - ofs << filename_buffer << "\n"; } - // Update the previous camera center - prev_cam_ctr = curr_cam_ctr; + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", + desired_cam_dir.c_str(), curr_time); + std::cout << "Writing: " << filename_buffer << std::endl; + cv::imwrite(filename_buffer, *img_ptr); + + // Save the name of the image in the index + ofs << filename_buffer << "\n"; } - std::cout << "Wrote: " << index_file << std::endl; + // Update the previous camera center + prev_cam_ctr = curr_cam_ctr; } - void save_navcam_poses_and_images(boost::shared_ptr sparse_map, - std::vector const& nav_cam_msgs, - std::string const& output_dir, std::string const& nav_cam_dir) { - // Keep track of where in the bag we are - int nav_cam_pos = 0; + std::cout << "Wrote: " << index_file << std::endl; +} - std::string index_file = output_dir + "/nav_cam_index.txt"; +void save_navcam_poses_and_images(boost::shared_ptr sparse_map, + std::vector const& nav_cam_msgs, + std::string const& output_dir, std::string const& nav_cam_dir) { + // Keep track of where in the bag we are + int nav_cam_pos = 0; - std::ofstream ofs(index_file.c_str()); + std::string index_file = output_dir + "/nav_cam_index.txt"; - char filename_buffer[1000]; + std::ofstream ofs(index_file.c_str()); - for (unsigned int cid = 0; cid < sparse_map->cid_to_filename_.size(); cid++) { - std::string img_file = sparse_map->cid_to_filename_[cid]; + char filename_buffer[1000]; - double timestamp = dense_map::fileNameToTimestamp(img_file); + for (unsigned int cid = 0; cid < sparse_map->cid_to_filename_.size(); cid++) { + std::string img_file = sparse_map->cid_to_filename_[cid]; - std::string ext = ff_common::file_extension(img_file); - std::string cam_file = ff_common::ReplaceInStr(img_file, "." + ext, "_nav_cam_to_world.txt"); - cam_file = output_dir + "/" + ff_common::basename(cam_file); - if (cam_file == img_file) LOG(FATAL) << "Failed to replace the image extension in: " << img_file; + double timestamp = dense_map::fileNameToTimestamp(img_file); - dense_map::writeMatrix(sparse_map->cid_to_cam_t_global_[cid].inverse().matrix(), cam_file); + std::string ext = ff_common::file_extension(img_file); + std::string cam_file = ff_common::ReplaceInStr(img_file, "." + ext, "_nav_cam_to_world.txt"); + cam_file = output_dir + "/" + ff_common::basename(cam_file); + if (cam_file == img_file) LOG(FATAL) << "Failed to replace the image extension in: " << img_file; - cv::Mat nav_cam_image; - bool save_grayscale = false; - double found_time = -1.0; - if (!dense_map::lookupImage(timestamp, nav_cam_msgs, save_grayscale, nav_cam_image, nav_cam_pos, found_time)) - continue; + dense_map::writeMatrix(sparse_map->cid_to_cam_t_global_[cid].inverse().matrix(), cam_file); - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", nav_cam_dir.c_str(), timestamp); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, nav_cam_image); + cv::Mat nav_cam_image; + bool save_grayscale = false; // read color images, if found + double found_time = -1.0; + if (!dense_map::lookupImage(timestamp, nav_cam_msgs, save_grayscale, nav_cam_image, + nav_cam_pos, found_time)) + continue; - ofs << filename_buffer << "\n"; - } - } + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", nav_cam_dir.c_str(), timestamp); + std::cout << "Writing: " << filename_buffer << std::endl; + cv::imwrite(filename_buffer, nav_cam_image); - DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and sci_cam data."); - DEFINE_string(sparse_map, "", "A registered sparse map made with some of the ROS bag data."); - DEFINE_string(output_dir, "", "The full path to a directory where to write the processed data."); - DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); - DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); - DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", - "The depth camera intensity topic in the bag file."); - DEFINE_string(sci_cam_topic, "/hw/cam_sci", "The sci cam topic in the bag file."); - DEFINE_string(sci_cam_exif_topic, "/hw/sci_cam_exif", "The sci cam exif metadata topic the output bag."); - DEFINE_string(camera_type, "all", - "Specify which cameras to process. By default, process all. Options: " - "nav_cam, sci_cam."); - DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); - DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); - DEFINE_double(sampling_spacing_seconds, 0.5, - "Spacing to use, in seconds, between consecutive depth images in " - "the bag that are processed."); - DEFINE_double(dist_between_processed_cams, 0.1, - "Once an image or depth image is processed, how far the camera " - "should move (in meters) before it should process more data."); - DEFINE_string(sci_cam_timestamps, "", - "Process only these sci cam timestamps (rather than " - "any in the bag using --dist_between_processed_cams, etc.). Must be " - "a file with one timestamp per line."); - DEFINE_double(max_iso_times_exposure, 5.1, - "Apply the inverse gamma transform to " - "images, multiply them by max_iso_times_exposure/ISO/exposure_time " - "to adjust for lightning differences, then apply the gamma " - "transform back. This value should be set to the maximum observed " - "ISO * exposure_time. The default is 5.1. Not used with simulated data."); - DEFINE_int32(depth_exclude_columns, 0, - "Remove this many columns of data from the rectangular depth image sensor " - "at margins to avoid some distortion of that data."); - DEFINE_int32(depth_exclude_rows, 0, - "Remove this many rows of data from the rectangular depth image sensor " - "at margins to avoid some distortion of that data."); - DEFINE_double(foreshortening_delta, 5.0, - "A smaller value here will result in holes in depth images being filled more " - "aggressively but potentially with more artifacts in foreshortened regions."); - DEFINE_string(median_filters, "7 0.1 25 0.1", - "Given a list 'w1 d1 w2 d2 ... ', remove a depth image point " - "if it differs, in the Manhattan norm, from the median of cloud points " - "in the pixel window of size wi centered at it by more than di. This " - "removes points sticking out for each such i."); - DEFINE_double(depth_hole_fill_diameter, 30.0, - "Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds " - "are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh " - "later (--max_hole_diameter)."); - DEFINE_bool(simulated_data, false, - "If specified, use data recorded in simulation. " - "Then haz and sci camera poses and intrinsics should be recorded in the bag file."); - DEFINE_bool(use_brisk_map, false, - "If specified, instead of a SURF sparse map made from the same bag that needs " - "texturing, use a pre-existing and unrelated BRISK map. This map may be more " - "convenient but less reliable."); - DEFINE_string(external_mesh, "", "Use this mesh to texture the images, rather than creating one from depth data."); - DEFINE_string(scicam_to_hazcam_timestamp_offset_override_value, "", - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " - "file with this value."); - DEFINE_bool(save_debug_data, false, "Save many intermediate datasets for debugging."); - - int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - GOOGLE_PROTOBUF_VERIFY_VERSION; - - if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; - if (FLAGS_output_dir.empty()) LOG(FATAL) << "The output directory was not specified."; - if (FLAGS_sparse_map.empty() && !FLAGS_simulated_data) LOG(FATAL) << "The sparse map was not specified."; - - if (!boost::filesystem::exists(FLAGS_ros_bag)) LOG(FATAL) << "Bag does not exist: " << FLAGS_ros_bag; - - if (!boost::filesystem::exists(FLAGS_sparse_map) && !FLAGS_simulated_data) - LOG(FATAL) << "Sparse map does not exist: " << FLAGS_sparse_map; - - // Parse the median filter params - double val; - std::vector median_filter_params; - std::istringstream ifs(FLAGS_median_filters); - while (ifs >> val) median_filter_params.push_back(val); - - if (median_filter_params.size() % 2 != 0) - LOG(FATAL) << "There must exist an even number of median filter parameters, " - << "being pairs of window sizes and distances."; - - // Need high precision to print timestamps - std::cout.precision(17); - - // Read simulated poses - dense_map::StampedPoseStorage sim_sci_cam_poses, sim_haz_cam_poses; - if (FLAGS_simulated_data) { - std::string haz_cam_pose_topic = std::string("/") + TOPIC_HAZ_CAM_SIM_POSE; - std::string sci_cam_pose_topic = std::string("/") + TOPIC_SCI_CAM_SIM_POSE; - std::cout << "haz cam pose topic " << haz_cam_pose_topic << std::endl; - std::cout << "sci cam pose topic " << sci_cam_pose_topic << std::endl; - dense_map::readBagPoses(FLAGS_ros_bag, haz_cam_pose_topic, sim_haz_cam_poses); - dense_map::readBagPoses(FLAGS_ros_bag, sci_cam_pose_topic, sim_sci_cam_poses); - } + ofs << filename_buffer << "\n"; + } +} - // Set up handles for reading data at given time stamp without - // searching through the whole bag each time. - dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); - dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); - dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); - dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); - dense_map::RosBagHandle exif_handle(FLAGS_ros_bag, FLAGS_sci_cam_exif_topic); - - std::vector nav_cam_bag_timestamps; - if (!FLAGS_simulated_data) - dense_map::readBagImageTimestamps(FLAGS_ros_bag, FLAGS_nav_cam_topic, nav_cam_bag_timestamps); - - std::map> sci_cam_exif; - if (!FLAGS_simulated_data) dense_map::readExifFromBag(exif_handle.bag_msgs, sci_cam_exif); - - // How many columns to exclude from the left and the right ends of - // the haz camera depth cloud (this is needed because that cloud has - // some distortion). - std::cout << "Cutting off " << FLAGS_depth_exclude_columns << " columns and " << FLAGS_depth_exclude_rows - << " rows at the margins of the depth point clouds.\n"; - - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - if (!FLAGS_simulated_data) { - // Read a bunch of transforms from the robot calibration file. - // The empirically determined offset between the measured timestamps - // of the cameras, in seconds. There are two reasons for this time offset: - // (1) The nav cam and sci cam are acquired on different processors. - // (2) The actual moment the camera image is recorded is not the same - // moment that image finished processing and the timestamp is set. - // This delay is worse for the sci cam which takes longer to acquire. - // TODO(oalexan1): camera_calibrator must actually solve for them. - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", - "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", - "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, - sci_cam_params); - - if (FLAGS_scicam_to_hazcam_timestamp_offset_override_value != "") { - double new_val = atof(FLAGS_scicam_to_hazcam_timestamp_offset_override_value.c_str()); - std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset - << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; - scicam_to_hazcam_timestamp_offset = new_val; - } +DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and sci_cam data."); +DEFINE_string(sparse_map, "", "A registered sparse map made with some of the ROS bag data."); +DEFINE_string(output_dir, "", "The full path to a directory where to write the processed data."); +DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); +DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", + "The depth camera intensity topic in the bag file."); +DEFINE_string(sci_cam_topic, "/hw/cam_sci", "The sci cam topic in the bag file."); +DEFINE_string(sci_cam_exif_topic, "/hw/sci_cam_exif", "The sci cam exif metadata topic the output bag."); +DEFINE_string(camera_type, "all", + "Specify which cameras to process. By default, process all. Options: " + "nav_cam, sci_cam."); +DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); +DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); +DEFINE_double(sampling_spacing_seconds, 0.5, + "Spacing to use, in seconds, between consecutive depth images in " + "the bag that are processed."); +DEFINE_double(dist_between_processed_cams, 0.1, + "Once an image or depth image is processed, how far the camera " + "should move (in meters) before it should process more data."); +DEFINE_string(sci_cam_timestamps, "", + "Process only these sci cam timestamps (rather than " + "any in the bag using --dist_between_processed_cams, etc.). Must be " + "a file with one timestamp per line."); +DEFINE_double(max_iso_times_exposure, 5.1, + "Apply the inverse gamma transform to " + "images, multiply them by max_iso_times_exposure/ISO/exposure_time " + "to adjust for lightning differences, then apply the gamma " + "transform back. This value should be set to the maximum observed " + "ISO * exposure_time. The default is 5.1. Not used with simulated data."); +DEFINE_int32(depth_exclude_columns, 0, + "Remove this many columns of data from the rectangular depth image sensor " + "at margins to avoid some distortion of that data."); +DEFINE_int32(depth_exclude_rows, 0, + "Remove this many rows of data from the rectangular depth image sensor " + "at margins to avoid some distortion of that data."); +DEFINE_double(foreshortening_delta, 5.0, + "A smaller value here will result in holes in depth images being filled more " + "aggressively but potentially with more artifacts in foreshortened regions."); +DEFINE_string(median_filters, "7 0.1 25 0.1", + "Given a list 'w1 d1 w2 d2 ... ', remove a depth image point " + "if it differs, in the Manhattan norm, from the median of cloud points " + "in the pixel window of size wi centered at it by more than di. This " + "removes points sticking out for each such i."); +DEFINE_double(depth_hole_fill_diameter, 30.0, + "Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds " + "are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh " + "later (--max_hole_diameter)."); +DEFINE_double(reliability_weight_exponent, 2.0, + "A larger value will give more weight to depth points corresponding to " + "pixels closer to depth image center, which are considered more reliable."); +DEFINE_bool(simulated_data, false, + "If specified, use data recorded in simulation. " + "Then haz and sci camera poses and intrinsics should be recorded in the bag file."); +DEFINE_bool(use_brisk_map, false, + "If specified, instead of a SURF sparse map made from the same bag that needs " + "texturing, use a pre-existing and unrelated BRISK map. This map may be more " + "convenient but less reliable."); +DEFINE_string(external_mesh, "", "Use this mesh to texture the images, rather than creating one from depth data."); +DEFINE_string(scicam_to_hazcam_timestamp_offset_override_value, "", + "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "file with this value."); +DEFINE_bool(save_debug_data, false, "Save many intermediate datasets for debugging."); + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; + + if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; + if (FLAGS_output_dir.empty()) LOG(FATAL) << "The output directory was not specified."; + if (FLAGS_sparse_map.empty() && !FLAGS_simulated_data) LOG(FATAL) << "The sparse map was not specified."; + + if (!boost::filesystem::exists(FLAGS_ros_bag)) LOG(FATAL) << "Bag does not exist: " << FLAGS_ros_bag; + + if (!boost::filesystem::exists(FLAGS_sparse_map) && !FLAGS_simulated_data) + LOG(FATAL) << "Sparse map does not exist: " << FLAGS_sparse_map; + + // Parse the median filter params + double val; + std::vector median_filter_params; + std::istringstream ifs(FLAGS_median_filters); + while (ifs >> val) median_filter_params.push_back(val); + + if (median_filter_params.size() % 2 != 0) + LOG(FATAL) << "There must exist an even number of median filter parameters, " + << "being pairs of window sizes and distances."; + + // Need high precision to print timestamps + std::cout.precision(17); + + // Read simulated poses + dense_map::StampedPoseStorage sim_sci_cam_poses, sim_haz_cam_poses; + if (FLAGS_simulated_data) { + std::string haz_cam_pose_topic = std::string("/") + TOPIC_HAZ_CAM_SIM_POSE; + std::string sci_cam_pose_topic = std::string("/") + TOPIC_SCI_CAM_SIM_POSE; + std::cout << "haz cam pose topic " << haz_cam_pose_topic << std::endl; + std::cout << "sci cam pose topic " << sci_cam_pose_topic << std::endl; + dense_map::readBagPoses(FLAGS_ros_bag, haz_cam_pose_topic, sim_haz_cam_poses); + dense_map::readBagPoses(FLAGS_ros_bag, sci_cam_pose_topic, sim_sci_cam_poses); + } - scicam_to_navcam_trans = hazcam_to_navcam_trans * scicam_to_hazcam_trans; - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "scicam_to_navcam_trans\n" << scicam_to_navcam_trans << std::endl; - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; - std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() << "\n"; + // Set up handles for reading data at given time stamp without + // searching through the whole bag each time. + dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); + dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); + dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); + dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); + dense_map::RosBagHandle exif_handle(FLAGS_ros_bag, FLAGS_sci_cam_exif_topic); + + std::vector nav_cam_bag_timestamps; + if (!FLAGS_simulated_data) + dense_map::readBagImageTimestamps(FLAGS_ros_bag, FLAGS_nav_cam_topic, nav_cam_bag_timestamps); + + std::map> sci_cam_exif; + if (!FLAGS_simulated_data) dense_map::readExifFromBag(exif_handle.bag_msgs, sci_cam_exif); + + // How many columns to exclude from the left and the right ends of + // the haz camera depth cloud (this is needed because that cloud has + // some distortion). + std::cout << "Cutting off " << FLAGS_depth_exclude_columns << " columns and " + << FLAGS_depth_exclude_rows << " rows at the margins of the depth point clouds.\n"; + + double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; + Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd scicam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); + Eigen::Affine3d hazcam_depth_to_image_transform; + hazcam_depth_to_image_transform.setIdentity(); // default value + camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + if (!FLAGS_simulated_data) { + // Read a bunch of transforms from the robot calibration file. + // The empirically determined offset between the measured timestamps + // of the cameras, in seconds. There are two reasons for this time offset: + // (1) The nav cam and sci cam are acquired on different processors. + // (2) The actual moment the camera image is recorded is not the same + // moment that image finished processing and the timestamp is set. + // This delay is worse for the sci cam which takes longer to acquire. + // TODO(oalexan1): camera_calibrator must actually solve for them. + dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", + "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", + "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, + scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, + navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, + sci_cam_params); + + if (FLAGS_scicam_to_hazcam_timestamp_offset_override_value != "") { + double new_val = atof(FLAGS_scicam_to_hazcam_timestamp_offset_override_value.c_str()); + std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset + << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; + scicam_to_hazcam_timestamp_offset = new_val; } - boost::shared_ptr sparse_map; - if (!FLAGS_simulated_data) { - std::cout << "Loading sparse map " << FLAGS_sparse_map << "\n"; - sparse_map = boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); - if (!FLAGS_use_brisk_map && - (sparse_map->GetDetectorName() != "SURF" || sparse_map->vocab_db_.binary_db != NULL)) { - LOG(FATAL) << "A SURF map with no vocab db is expected, unless --use_brisk_map is specified."; - } - if (FLAGS_use_brisk_map && - (sparse_map->GetDetectorName() != "ORGBRISK" || sparse_map->vocab_db_.binary_db == NULL)) { - LOG(FATAL) << "An ORGBRISK map with a vocab db is expected with --use_brisk_map."; - } - } + scicam_to_navcam_trans = hazcam_to_navcam_trans * scicam_to_hazcam_trans; + std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; + std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; + std::cout << "scicam_to_navcam_trans\n" << scicam_to_navcam_trans << std::endl; + std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; + std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; + std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() << "\n"; + } - // A very important sanity check. Not how we use gflags programatically - // to access the value of --histogram_equalization as a string encoding a bool - // ("true" or "false"). Here we ensure that the same flag for histogram - // equalization is used in the map and for localization. - std::string histogram_equalization; - if (gflags::GetCommandLineOption("histogram_equalization", &histogram_equalization)) { - if ((histogram_equalization == "true" && !sparse_map->GetHistogramEqualization()) || - (histogram_equalization == "false" && sparse_map->GetHistogramEqualization())) - LOG(FATAL) << "The histogram equalization option in the sparse map and then one desired " - << "to use for localization are incompatible."; + boost::shared_ptr sparse_map; + if (!FLAGS_simulated_data) { + std::cout << "Loading sparse map " << FLAGS_sparse_map << "\n"; + sparse_map = boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + if (!FLAGS_use_brisk_map && + (sparse_map->GetDetectorName() != "SURF" || sparse_map->vocab_db_.binary_db != NULL)) { + LOG(FATAL) << "A SURF map with no vocab db is expected, unless --use_brisk_map is specified."; } - - // If desired to process only specific timestamps - std::set sci_cam_timestamps; - if (FLAGS_sci_cam_timestamps != "") { - std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); - double val; - while (ifs >> val) sci_cam_timestamps.insert(val); + if (FLAGS_use_brisk_map && + (sparse_map->GetDetectorName() != "ORGBRISK" || sparse_map->vocab_db_.binary_db == NULL)) { + LOG(FATAL) << "An ORGBRISK map with a vocab db is expected with --use_brisk_map."; } + } - dense_map::createDir(FLAGS_output_dir); + // A very important sanity check. Not how we use gflags programatically + // to access the value of --histogram_equalization as a string encoding a bool + // ("true" or "false"). Here we ensure that the same flag for histogram + // equalization is used in the map and for localization. + std::string histogram_equalization; + if (gflags::GetCommandLineOption("histogram_equalization", &histogram_equalization)) { + if ((histogram_equalization == "true" && !sparse_map->GetHistogramEqualization()) || + (histogram_equalization == "false" && sparse_map->GetHistogramEqualization())) + LOG(FATAL) << "The histogram equalization option in the sparse map and then one desired " + << "to use for localization are incompatible."; + } - // Store here the sci cam and nav cam images whose poses we save - std::string sci_cam_dir = FLAGS_output_dir + "/distorted_sci_cam"; - std::string nav_cam_dir = FLAGS_output_dir + "/distorted_nav_cam"; - dense_map::createDir(sci_cam_dir); - dense_map::createDir(nav_cam_dir); + // If desired to process only specific timestamps + std::set sci_cam_timestamps; + if (FLAGS_sci_cam_timestamps != "") { + std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); + double val; + while (ifs >> val) sci_cam_timestamps.insert(val); + } - // Save haz cam poses and haz cam clouds - if (FLAGS_external_mesh == "") { - std::string cam_type = "haz_cam"; - // haz to nav offset - double desired_cam_to_nav_cam_offset = -navcam_to_hazcam_timestamp_offset; - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, hazcam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, "", // not needed - FLAGS_start, FLAGS_duration, FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, - sci_cam_timestamps, FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, - sim_haz_cam_poses, FLAGS_external_mesh); - } + dense_map::createDir(FLAGS_output_dir); + + // Store here the sci cam and nav cam images whose poses we save + std::string nav_cam_dir = FLAGS_output_dir + "/distorted_nav_cam"; + std::string haz_cam_dir = FLAGS_output_dir + "/distorted_haz_cam"; + std::string sci_cam_dir = FLAGS_output_dir + "/distorted_sci_cam"; + dense_map::createDir(nav_cam_dir); + dense_map::createDir(haz_cam_dir); + dense_map::createDir(sci_cam_dir); + + // Save haz cam poses and haz cam clouds + if (FLAGS_external_mesh == "") { + std::string cam_type = "haz_cam"; + // haz to nav offset + double desired_cam_to_nav_cam_offset = -navcam_to_hazcam_timestamp_offset; + saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, hazcam_to_navcam_trans, + hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, + FLAGS_reliability_weight_exponent, + median_filter_params, + FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, + haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, + sci_cam_exif, FLAGS_output_dir, haz_cam_dir, + FLAGS_start, FLAGS_duration, FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, + sci_cam_timestamps, FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, + sim_haz_cam_poses, FLAGS_external_mesh); + } - // Save sci cam poses and sci cam images - if (FLAGS_camera_type == "sci_cam" || FLAGS_camera_type == "all") { - std::string cam_type = "sci_cam"; - // sci to nav offset - double desired_cam_to_nav_cam_offset = scicam_to_hazcam_timestamp_offset - navcam_to_hazcam_timestamp_offset; - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, scicam_to_navcam_trans, + // Save sci cam poses and sci cam images + if (FLAGS_camera_type == "sci_cam" || FLAGS_camera_type == "all") { + std::string cam_type = "sci_cam"; + // sci to nav offset + double desired_cam_to_nav_cam_offset = scicam_to_hazcam_timestamp_offset - navcam_to_hazcam_timestamp_offset; + saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, scicam_to_navcam_trans, + hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, + FLAGS_reliability_weight_exponent, + median_filter_params, + FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, + haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, + sci_cam_exif, FLAGS_output_dir, sci_cam_dir, FLAGS_start, FLAGS_duration, + FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, + FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_sci_cam_poses, + FLAGS_external_mesh); + + // With simulated data we won't perform an additional undistortion + // step which would save the undistorted intrinsics. Hence need to + // do that here. + if (FLAGS_simulated_data) saveSciCamIntrinsics(FLAGS_ros_bag, sci_cam_dir); + } + // Save nav cam poses and nav cam images. This does not work for simulated data. + if ((FLAGS_camera_type == "nav_cam" || FLAGS_camera_type == "all") && !FLAGS_simulated_data) { + if (FLAGS_use_brisk_map) { + // Do the same logic as for the sci cam, localize each nav cam image against the map, etc. + std::string cam_type = "nav_cam"; + double desired_cam_to_nav_cam_offset = 0.0; // no offset from the nav cam to itself + dense_map::StampedPoseStorage sim_nav_cam_poses; // won't be used + saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, navcam_to_navcam_trans, hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, median_filter_params, + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, + FLAGS_reliability_weight_exponent, + median_filter_params, FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, sci_cam_dir, FLAGS_start, FLAGS_duration, + sci_cam_exif, FLAGS_output_dir, nav_cam_dir, FLAGS_start, FLAGS_duration, FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, - FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_sci_cam_poses, + FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_nav_cam_poses, FLAGS_external_mesh); - - // With simulated data we won't perform an additional undistortion - // step which would save the undistorted intrinsics. Hence need to - // do that here. - if (FLAGS_simulated_data) saveSciCamIntrinsics(FLAGS_ros_bag, sci_cam_dir); - } - // Save nav cam poses and nav cam images. This does not work for simulated data. - if ((FLAGS_camera_type == "nav_cam" || FLAGS_camera_type == "all") && !FLAGS_simulated_data) { - if (FLAGS_use_brisk_map) { - // Do the same logic as for the sci cam, localize each nav cam image against the map, etc. - std::string cam_type = "nav_cam"; - double desired_cam_to_nav_cam_offset = 0.0; // no offset from the nav cam to itself - dense_map::StampedPoseStorage sim_nav_cam_poses; // won't be used - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, navcam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, nav_cam_dir, FLAGS_start, FLAGS_duration, - FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, - FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_nav_cam_poses, - FLAGS_external_mesh); - } else { - // Take a shortcut, since the sparse map is made of precisely of the images we - // want to texture, just save those images and their poses from the map, avoiding - // localization - save_navcam_poses_and_images(sparse_map, nav_cam_handle.bag_msgs, FLAGS_output_dir, nav_cam_dir); - } + } else { + // Take a shortcut, since the sparse map is made of precisely of the images we + // want to texture, just save those images and their poses from the map, avoiding + // localization + save_navcam_poses_and_images(sparse_map, nav_cam_handle.bag_msgs, FLAGS_output_dir, nav_cam_dir); } - - return 0; } + + return 0; +} diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index 93e770a6..018ed92c 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -18,410 +18,696 @@ # License for the specific language governing permissions and limitations # under the License. -''' +""" A wrapper around the tools that when run together produce a textured mesh. -''' +""" + +import argparse +import os +import re +import shutil +import subprocess +import sys + +import cv2 -import sys, os, argparse, subprocess, re, shutil, cv2 def process_args(args): - ''' + """ Set up the parser and parse the args. - ''' - + """ + # Extract some paths before the args are parsed src_path = os.path.dirname(args[0]) - exec_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(src_path)))) - exec_path = os.path.join(exec_path, 'devel/lib/geometry_mapper') - - astrobee_build_dir = os.environ['ASTROBEE_BUILD_PATH'] - - parser = argparse.ArgumentParser(description='Parameters for the geometry mapper.') - parser.add_argument('--ros_bag', dest = 'ros_bag', default = '', - help = 'A ROS bag with recorded nav_cam, haz_cam, and sci_cam data.') - parser.add_argument('--sparse_map', dest = 'sparse_map', default = '', - help = 'A registered sparse map made with some of the ROS bag data.') - parser.add_argument('--output_dir', dest = 'output_dir', default = '', - help = 'The directory where to write the processed data.') - parser.add_argument('--nav_cam_topic', dest = 'nav_cam_topic', default = '/hw/cam_nav', - help = 'The nav cam topic in the bag file.') - parser.add_argument('--haz_cam_points_topic', dest = 'haz_cam_points_topic', - default = '/hw/depth_haz/points', - help = 'The haz cam point cloud topic in the bag file.') - parser.add_argument('--haz_cam_intensity_topic', dest = 'haz_cam_intensity_topic', - default = '/hw/depth_haz/extended/amplitude_int', - help = 'The haz cam intensity topic in the bag file.') - parser.add_argument('--sci_cam_topic', dest = 'sci_cam_topic', default = '/hw/cam_sci', - help = 'The sci cam topic in the bag file.') - parser.add_argument('--camera_type', dest = 'camera_type', default = 'all', - help = 'Specify which cameras to process. By default, process all. ' - + 'Options: nav_cam, sci_cam.') - parser.add_argument('--start', dest = 'start', default = '0.0', - help = 'How many seconds into the bag to start processing the data.') - parser.add_argument('--duration', dest = 'duration', default = '-1.0', - help = 'For how many seconds to do the processing.') - parser.add_argument('--sampling_spacing_seconds', dest = 'sampling_spacing_seconds', - default = '2', - help = 'Spacing to use, in seconds, between consecutive depth images in ' - 'the bag that are processed.') - parser.add_argument('--dist_between_processed_cams', dest = 'dist_between_processed_cams', - default = '0.1', - help = 'Once an image or depth image is processed, how far the camera ' - + 'should move (in meters) before it should process more data.') - parser.add_argument('--sci_cam_timestamps', dest = 'sci_cam_timestamps', - default = '', - help = 'Process only these sci cam timestamps (rather than ' + - 'any in the bag using --dist_between_processed_cams, etc.). ' + - 'Must be a file with one timestamp per line.') - parser.add_argument('--depth_exclude_columns', dest = 'depth_exclude_columns', default = '0', - help = 'Remove this many columns of data from the rectangular ' + - 'depth image sensor at margins to avoid distortion.') - parser.add_argument('--depth_exclude_rows', dest = 'depth_exclude_rows', default = '0', - help = 'Remove this many rows of data from the rectangular ' + - 'depth image sensor at margins to avoid distortion.') - parser.add_argument('--foreshortening_delta', dest = 'foreshortening_delta', default = '5.0', - help = 'A smaller value here will result in holes in depth images ' + - 'being filled more aggressively but potentially with more artifacts ' + - 'in foreshortened regions.') - parser.add_argument('--median_filters', dest = 'median_filters', default = '7 0.1 25 0.1', - help = 'Given a list "w1 d1 w2 d2 ... ", remove a depth image point ' + - 'if it differs, in the Manhattan norm, from the median of depth points ' + - 'in the pixel window of size wi centered at it by more than di. This ' + - 'removes points sticking out for each such i.') - parser.add_argument('--depth_hole_fill_diameter', dest = 'depth_hole_fill_diameter', default = '30', - help = 'Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (--max_hole_diameter).') - parser.add_argument('--max_ray_length', dest = 'max_ray_length', default = '2.0', - help = 'Process haz cam depth image points no further than ' - + 'this distance from the camera.') - parser.add_argument('--voxel_size', dest = 'voxel_size', default = '0.01', - help = 'When fusing the depth point clouds use a voxel of this size, ' - + 'in meters.') - parser.add_argument('--max_iso_times_exposure', dest = 'max_iso_times_exposure', - default = '5.1', - help = 'Apply the inverse gamma transform to images, multiply them by ' - + 'max_iso_times_exposure/ISO/exposure_time to adjust for ' - + 'lightning differences, then apply the gamma transform back. ' - + 'This value should be set to the maximum observed ' - + 'ISO * exposure_time. The default is 5.1. Not used with simulated data.') - parser.add_argument('--smoothing_time', dest = 'smoothing_time', - default = '0.00005', - help = 'A larger value will result in a smoother mesh.') - parser.add_argument('--max_num_hole_edges', dest = 'max_num_hole_edges', - default = '1000', - help = 'Close holes in the mesh which have no more than this many edges.') - parser.add_argument('--max_hole_diameter', dest = 'max_hole_diameter', - default = '0.3', - help = 'The diameter (in meters) of the largest hole in the mesh to fill.') - parser.add_argument('--num_min_faces_in_component', dest = 'num_min_faces_in_component', - default = '100', - help = 'Keep only connected mesh components with at least this many faces.') - parser.add_argument('--num_components_to_keep', dest = 'num_components_to_keep', - default = '10', - help = 'How many of the largest connected components ' - + 'of the mesh to keep. Being too aggressive here can result in a mesh ' - + 'with missing parts.') - parser.add_argument('--edge_keep_ratio', dest = 'edge_keep_ratio', - default = '0.2', - help = 'Simply the mesh keeping only this fraction of the original edges.') - parser.add_argument('--merge_maps', dest = 'merge_maps', - default = '', - help = 'Given several output geometry mapper directories, specified ' + \ - 'as a list in quotes, create a merged textured mesh. The input ' + \ - 'bag and sparse map will not be used. Each input geometry mapper run ' + \ - 'can have its own bag and sparse map. The sparse maps must be ' + \ - 'registered to a global coordinate system and co-registered to ' + \ - 'each other, such as when extracted from a larger merged and ' + \ - 'registered map.') - parser.add_argument('--start_step', dest = 'start_step', default = '0', - help = 'Start processing at this step. Useful for resuming work. ' + \ - 'See the doc for options.') - parser.add_argument('--astrobee_build_dir', dest = 'astrobee_build_dir', - default = astrobee_build_dir, - help = 'The path to the Astrobee build directory.') - parser.add_argument('--localization_options', dest = 'localization_options', - default = '--min_surf_features 400 --max_surf_features 600 --min_surf_threshold 5 --default_surf_threshold 10 --max_surf_threshold 1000 --early_break_landmarks 400 --verbose_localization', - help = 'Options to to use to localize the nav cam images.') - - parser.add_argument('--use_brisk_map', dest = 'use_brisk_map', action = 'store_true', - help = 'Instead of a SURF sparse map made from the same bag that ' + \ - ' needs texturing, use a pre-existing and unrelated BRISK map. ' + - 'This map may be more convenient but less reliable.') - parser.add_argument('--simulated_data', dest = 'simulated_data', action = 'store_true', - help = 'If specified, use data recorded in simulation. ' + \ - 'Then haz and sci camera poses and intrinsics should be recorded ' + \ - 'in the bag file.') - parser.add_argument('--external_mesh', dest = 'external_mesh', default = '', - help = 'Use this mesh to texture the images, rather than creating one ' + \ - 'from depth data in the current bag.') - parser.add_argument('--scicam_to_hazcam_timestamp_offset_override_value', - dest = 'scicam_to_hazcam_timestamp_offset_override_value', - default = '', - help = 'Override the value of scicam_to_hazcam_timestamp_offset ' + \ - 'from the robot config file with this value.') - parser.add_argument('--verbose', dest = 'verbose', action = 'store_true', - help = 'Echo all output in the terminal.') - parser.add_argument('--save_debug_data', dest = 'save_debug_data', action = 'store_true', - help = 'Save many intermediate datasets for debugging.') + exec_path = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.dirname(src_path))) + ) + exec_path = os.path.join(exec_path, "devel/lib/geometry_mapper") + + astrobee_build_dir = os.environ["ASTROBEE_BUILD_PATH"] + + parser = argparse.ArgumentParser(description="Parameters for the geometry mapper.") + parser.add_argument( + "--ros_bag", + dest="ros_bag", + default="", + help="A ROS bag with recorded nav_cam, haz_cam, and sci_cam data.", + ) + parser.add_argument( + "--sparse_map", + dest="sparse_map", + default="", + help="A registered sparse map made with some of the ROS bag data.", + ) + parser.add_argument( + "--output_dir", + dest="output_dir", + default="", + help="The directory where to write the processed data.", + ) + parser.add_argument( + "--nav_cam_topic", + dest="nav_cam_topic", + default="/hw/cam_nav", + help="The nav cam topic in the bag file.", + ) + parser.add_argument( + "--haz_cam_points_topic", + dest="haz_cam_points_topic", + default="/hw/depth_haz/points", + help="The haz cam point cloud topic in the bag file.", + ) + parser.add_argument( + "--haz_cam_intensity_topic", + dest="haz_cam_intensity_topic", + default="/hw/depth_haz/extended/amplitude_int", + help="The haz cam intensity topic in the bag file.", + ) + parser.add_argument( + "--sci_cam_topic", + dest="sci_cam_topic", + default="/hw/cam_sci", + help="The sci cam topic in the bag file.", + ) + parser.add_argument( + "--camera_type", + dest="camera_type", + default="all", + help="Specify which cameras to process. By default, process all. " + + "Options: nav_cam, haz_cam, sci_cam.", + ) + parser.add_argument( + "--start", + dest="start", + default="0.0", + help="How many seconds into the bag to start processing the data.", + ) + parser.add_argument( + "--duration", + dest="duration", + default="-1.0", + help="For how many seconds to do the processing.", + ) + parser.add_argument( + "--sampling_spacing_seconds", + dest="sampling_spacing_seconds", + default="2", + help="Spacing to use, in seconds, between consecutive depth images in " + "the bag that are processed.", + ) + parser.add_argument( + "--dist_between_processed_cams", + dest="dist_between_processed_cams", + default="0.1", + help="Once an image or depth image is processed, how far the camera " + + "should move (in meters) before it should process more data.", + ) + parser.add_argument( + "--sci_cam_timestamps", + dest="sci_cam_timestamps", + default="", + help="Process only these sci cam timestamps (rather than " + + "any in the bag using --dist_between_processed_cams, etc.). " + + "Must be a file with one timestamp per line.", + ) + parser.add_argument( + "--depth_exclude_columns", + dest="depth_exclude_columns", + default="0", + help="Remove this many columns of data from the rectangular " + + "depth image sensor at margins to avoid distortion.", + ) + parser.add_argument( + "--depth_exclude_rows", + dest="depth_exclude_rows", + default="0", + help="Remove this many rows of data from the rectangular " + + "depth image sensor at margins to avoid distortion.", + ) + parser.add_argument( + "--foreshortening_delta", + dest="foreshortening_delta", + default="5.0", + help="A smaller value here will result in holes in depth images " + + "being filled more aggressively but potentially with more artifacts " + + "in foreshortened regions.", + ) + parser.add_argument( + "--median_filters", + dest="median_filters", + default="7 0.1 25 0.1", + help='Given a list "w1 d1 w2 d2 ... ", remove a depth image point ' + + "if it differs, in the Manhattan norm, from the median of depth points " + + "in the pixel window of size wi centered at it by more than di. This " + + "removes points sticking out for each such i.", + ) + parser.add_argument( + "--depth_hole_fill_diameter", + dest="depth_hole_fill_diameter", + default="30", + help="Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (--max_hole_diameter).", + ) + parser.add_argument( + "--reliability_weight_exponent", + dest="reliability_weight_exponent", + default="2", + help="A larger value will give more weight to depth points corresponding to pixels closer to depth image center, which are considered more reliable.", + ) + parser.add_argument( + "--max_ray_length", + dest="max_ray_length", + default="2.0", + help="Process haz cam depth image points no further than " + + "this distance from the camera.", + ) + parser.add_argument( + "--voxel_size", + dest="voxel_size", + default="0.01", + help="When fusing the depth point clouds use a voxel of this size, " + + "in meters.", + ) + parser.add_argument( + "--voxblox_integrator", + dest="voxblox_integrator", + default="merged", + help="When fusing the depth point clouds use this VoxBlox method. " + + 'Options are: "merged", "simple", and "fast".', + ) + parser.add_argument( + "--max_iso_times_exposure", + dest="max_iso_times_exposure", + default="5.1", + help="Apply the inverse gamma transform to images, multiply them by " + + "max_iso_times_exposure/ISO/exposure_time to adjust for " + + "lightning differences, then apply the gamma transform back. " + + "This value should be set to the maximum observed " + + "ISO * exposure_time. The default is 5.1. Not used with simulated data.", + ) + parser.add_argument( + "--smoothing_time", + dest="smoothing_time", + default="0.00005", + help="A larger value will result in a smoother mesh.", + ) + parser.add_argument( + "--max_num_hole_edges", + dest="max_num_hole_edges", + default="1000", + help="Close holes in the mesh which have no more than this many edges.", + ) + parser.add_argument( + "--max_hole_diameter", + dest="max_hole_diameter", + default="0.3", + help="The diameter (in meters) of the largest hole in the mesh to fill.", + ) + parser.add_argument( + "--num_min_faces_in_component", + dest="num_min_faces_in_component", + default="100", + help="Keep only connected mesh components with at least this many faces.", + ) + parser.add_argument( + "--num_components_to_keep", + dest="num_components_to_keep", + default="10", + help="How many of the largest connected components " + + "of the mesh to keep. Being too aggressive here can result in a mesh " + + "with missing parts.", + ) + parser.add_argument( + "--edge_keep_ratio", + dest="edge_keep_ratio", + default="0.2", + help="Simply the mesh keeping only this fraction of the original edges.", + ) + parser.add_argument( + "--merge_maps", + dest="merge_maps", + default="", + help="Given several output geometry mapper directories, specified " + + "as a list in quotes, create a merged textured mesh. The input " + + "bag and sparse map will not be used. Each input geometry mapper run " + + "can have its own bag and sparse map. The sparse maps must be " + + "registered to a global coordinate system and co-registered to " + + "each other, such as when extracted from a larger merged and " + + "registered map.", + ) + parser.add_argument( + "--start_step", + dest="start_step", + default="0", + help="Start processing at this step. Useful for resuming work. " + + "See the doc for options.", + ) + parser.add_argument( + "--astrobee_build_dir", + dest="astrobee_build_dir", + default=astrobee_build_dir, + help="The path to the Astrobee build directory.", + ) + parser.add_argument( + "--localization_options", + dest="localization_options", + default="--min_surf_features 400 --max_surf_features 600 --min_surf_threshold 5 --default_surf_threshold 10 --max_surf_threshold 1000 --early_break_landmarks 400 --verbose_localization", + help="Options to to use to localize the nav cam images.", + ) + + parser.add_argument( + "--use_brisk_map", + dest="use_brisk_map", + action="store_true", + help="Instead of a SURF sparse map made from the same bag that " + + " needs texturing, use a pre-existing and unrelated BRISK map. " + + "This map may be more convenient but less reliable.", + ) + parser.add_argument( + "--simulated_data", + dest="simulated_data", + action="store_true", + help="If specified, use data recorded in simulation. " + + "Then haz and sci camera poses and intrinsics should be recorded " + + "in the bag file.", + ) + parser.add_argument( + "--external_mesh", + dest="external_mesh", + default="", + help="Use this mesh to texture the images, rather than creating one " + + "from depth data in the current bag.", + ) + parser.add_argument( + "--scicam_to_hazcam_timestamp_offset_override_value", + dest="scicam_to_hazcam_timestamp_offset_override_value", + default="", + help="Override the value of scicam_to_hazcam_timestamp_offset " + + "from the robot config file with this value.", + ) + parser.add_argument( + "--verbose", + dest="verbose", + action="store_true", + help="Echo all output in the terminal.", + ) + parser.add_argument( + "--save_debug_data", + dest="save_debug_data", + action="store_true", + help="Save many intermediate datasets for debugging.", + ) args = parser.parse_args() return (src_path, exec_path, args) + def sanity_checks(geometry_mapper_path, batch_tsdf_path, args): - # Check if the environemnt was set - for var in 'ASTROBEE_RESOURCE_DIR', 'ASTROBEE_CONFIG_DIR', 'ASTROBEE_WORLD', 'ASTROBEE_ROBOT': + # Check if the environment was set + for var in [ + "ASTROBEE_RESOURCE_DIR", + "ASTROBEE_CONFIG_DIR", + "ASTROBEE_WORLD", + "ASTROBEE_ROBOT", + ]: if var not in os.environ: raise Exception("Must set " + var) - + if not os.path.exists(geometry_mapper_path): raise Exception("Cannot find the geometry mapper: " + geometry_mapper_path) - + if not os.path.exists(batch_tsdf_path): raise Exception("Cannot find batch_tsdf (a voxblox tool): " + batch_tsdf_path) if not os.path.isdir(args.astrobee_build_dir): - raise Exception("Cannot find the astrobee_build directory. " - + "Specify it via --astrobee_build_dir.") + raise Exception( + "Cannot find the astrobee_build directory. " + + "Specify it via --astrobee_build_dir." + ) + + if args.camera_type == "nav_cam" and args.simulated_data: + print( + "Cannot apply nav cam texture with simulated cameras " + + "as the gazebo distorted images are not accurate enough." + ) + sys.exit(1) - if args.camera_type == 'nav_cam' and args.simulated_data: - print("Cannot apply nav cam texture with simulated cameras " + \ - "as the gazebo distorted images are not accurate enough.") + if args.camera_type == "haz_cam" and args.simulated_data: + print("Texturing haz cam with simulated data was not tested.") sys.exit(1) - + if args.output_dir == "": raise Exception("The path to the output directory was not specified.") + def mkdir_p(path): if path == "": - return # this can happen when path is os.path.dirname("myfile.txt") + return # this can happen when path is os.path.dirname("myfile.txt") try: os.makedirs(path) except OSError: if os.path.isdir(path): pass else: - raise Exception('Could not make directory ' + path + ' as a file with this name exists.') + raise Exception( + "Could not make directory " + path + " as a file with this name exists." + ) + def setup_outputs(args): mkdir_p(args.output_dir) -def run_cmd(cmd, log_file, verbose = False): - ''' + +def run_cmd(cmd, log_file, verbose=False): + """ Run a command and write the output to a file. In verbose mode also print to screen. - ''' + """ print(" ".join(cmd) + "\n") - with open(log_file, 'w', buffering = 0) as f: # replace 'w' with 'wb' for Python 3 + with open(log_file, "w", buffering=0) as f: # replace 'w' with 'wb' for Python 3 f.write(" ".join(cmd) + "\n") process = subprocess.Popen(cmd, stdout=subprocess.PIPE) - for line in iter(process.stdout.readline, ''): # replace '' with b'' for Python 3 + for line in iter( + process.stdout.readline, "" + ): # replace '' with b'' for Python 3 if verbose: sys.stdout.write(line) f.write(line) - # If a certain step failed, do not continue - process.wait() - if (process.returncode != 0): + # If a certain step failed, do not continue + process.wait() + if process.returncode != 0: print("Failed execution of: " + " ".join(cmd)) sys.exit(1) - + + def compute_poses_and_clouds(geometry_mapper_path, args): - ''' + """ Invoke the geometry_mapper tool to compute needed camera poses and clouds. - ''' - - cmd = [geometry_mapper_path, - '--ros_bag', args.ros_bag, - '--sparse_map', args.sparse_map, - '--output_dir', args.output_dir, - '--nav_cam_topic', args.nav_cam_topic, - '--haz_cam_points_topic', args.haz_cam_points_topic, - '--haz_cam_intensity_topic', args.haz_cam_intensity_topic, - '--sci_cam_topic', args.sci_cam_topic, - '--camera_type', args.camera_type, - '--start', args.start, - '--duration', args.duration, - '--sampling_spacing_seconds', args.sampling_spacing_seconds, - '--dist_between_processed_cams', args.dist_between_processed_cams, - '--max_iso_times_exposure', args.max_iso_times_exposure, - '--depth_exclude_columns', args.depth_exclude_columns, - '--depth_exclude_rows', args.depth_exclude_rows, - '--foreshortening_delta', args.foreshortening_delta, - '--depth_hole_fill_diameter', args.depth_hole_fill_diameter, - '--median_filters', args.median_filters, - ] + args.localization_options.split(' ') + """ + + cmd = [ + geometry_mapper_path, + "--ros_bag", + args.ros_bag, + "--sparse_map", + args.sparse_map, + "--output_dir", + args.output_dir, + "--nav_cam_topic", + args.nav_cam_topic, + "--haz_cam_points_topic", + args.haz_cam_points_topic, + "--haz_cam_intensity_topic", + args.haz_cam_intensity_topic, + "--sci_cam_topic", + args.sci_cam_topic, + "--camera_type", + args.camera_type, + "--start", + args.start, + "--duration", + args.duration, + "--sampling_spacing_seconds", + args.sampling_spacing_seconds, + "--dist_between_processed_cams", + args.dist_between_processed_cams, + "--max_iso_times_exposure", + args.max_iso_times_exposure, + "--depth_exclude_columns", + args.depth_exclude_columns, + "--depth_exclude_rows", + args.depth_exclude_rows, + "--foreshortening_delta", + args.foreshortening_delta, + "--depth_hole_fill_diameter", + args.depth_hole_fill_diameter, + "--reliability_weight_exponent", + args.reliability_weight_exponent, + "--median_filters", + args.median_filters, + ] + args.localization_options.split(" ") if args.sci_cam_timestamps != "": - cmd += ['--sci_cam_timestamps', args.sci_cam_timestamps] - + cmd += ["--sci_cam_timestamps", args.sci_cam_timestamps] + if args.use_brisk_map: - cmd += ['--use_brisk_map'] + cmd += ["--use_brisk_map"] if args.simulated_data: - cmd += ['--simulated_data'] + cmd += ["--simulated_data"] if args.save_debug_data: - cmd += ['--save_debug_data'] + cmd += ["--save_debug_data"] if args.external_mesh != "": - cmd += ['--external_mesh', args.external_mesh] - + cmd += ["--external_mesh", args.external_mesh] + if args.scicam_to_hazcam_timestamp_offset_override_value != "": - cmd += ['--scicam_to_hazcam_timestamp_offset_override_value', - args.scicam_to_hazcam_timestamp_offset_override_value] + cmd += [ + "--scicam_to_hazcam_timestamp_offset_override_value", + args.scicam_to_hazcam_timestamp_offset_override_value, + ] + + log_file = os.path.join(args.output_dir, "geometry_mapper_log.txt") + print( + "Compute camera poses and extract data from the bag. Writing the output log to: " + + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + - log_file = os.path.join(args.output_dir, 'geometry_mapper_log.txt') - print('Compute camera poses and extract data from the bag. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) - def fuse_clouds(batch_tsdf_path, mesh, args): - ''' + """ Invoke the voxblox batch_tsdf tool to fuse the depth images. - ''' - - haz_cam_index = os.path.join(args.output_dir, 'haz_cam_index.txt') + """ + + depth_cam_index = os.path.join(args.output_dir, "depth_cam_index.txt") - cmd = [batch_tsdf_path, haz_cam_index, mesh, args.max_ray_length, args.voxel_size] + cmd = [ + batch_tsdf_path, + depth_cam_index, + mesh, + args.max_ray_length, + args.voxel_size, + args.voxblox_integrator, + ] + + log_file = os.path.join(args.output_dir, "voxblox_log.txt") + print( + "Fusing the depth images with voxblox. Writing the output log to: " + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) - log_file = os.path.join(args.output_dir, 'voxblox_log.txt') - print('Fusing the depth images with voxblox. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) def smoothe_mesh(input_mesh, output_mesh, args, attempt): - smoothe_mesh_path = os.path.join(os.environ['HOME'], - 'projects/cgal_tools/smoothe_mesh') + smoothe_mesh_path = os.path.join( + os.environ["HOME"], "projects/cgal_tools/smoothe_mesh" + ) if not os.path.exists(smoothe_mesh_path): raise Exception("Cannot find the smoothing tool:" + smoothe_mesh_path) - num_iterations = '1' - smoothe_boundary = '1' - cmd = [smoothe_mesh_path, num_iterations, args.smoothing_time, smoothe_boundary, - input_mesh, output_mesh] + num_iterations = "1" + smoothe_boundary = "1" + cmd = [ + smoothe_mesh_path, + num_iterations, + args.smoothing_time, + smoothe_boundary, + input_mesh, + output_mesh, + ] + + log_file = os.path.join( + args.output_dir, "smooth_mesh_attempt_" + str(attempt) + "_log.txt" + ) + print("Smoothing the mesh. Writing the output log to: " + log_file) + run_cmd(cmd, log_file, verbose=args.verbose) - log_file = os.path.join(args.output_dir, 'smooth_mesh_attempt_' \ - + str(attempt) + '_log.txt') - print('Smoothing the mesh. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) def fill_holes_in_mesh(input_mesh, output_mesh, args, attempt): - fill_holes_path = os.path.join(os.environ['HOME'], 'projects/cgal_tools/fill_holes') + fill_holes_path = os.path.join(os.environ["HOME"], "projects/cgal_tools/fill_holes") if not os.path.exists(fill_holes_path): raise Exception("Cannot find the hole-filling tool:" + fill_holes_path) - cmd = [fill_holes_path, args.max_hole_diameter, - args.max_num_hole_edges, input_mesh, output_mesh] + cmd = [ + fill_holes_path, + args.max_hole_diameter, + args.max_num_hole_edges, + input_mesh, + output_mesh, + ] + + log_file = os.path.join( + args.output_dir, "fill_holes_attempt_" + str(attempt) + "_log.txt" + ) + print("Hole-filling the mesh. Writing the output log to: " + log_file) + run_cmd(cmd, log_file, verbose=args.verbose) - log_file = os.path.join(args.output_dir, 'fill_holes_attempt_' \ - + str(attempt) + '_log.txt') - print('Hole-filling the mesh. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) def rm_connected_components(input_mesh, output_mesh, args): - rm_connected_components_path = os.path.join(os.environ['HOME'], - 'projects/cgal_tools/rm_connected_components') + rm_connected_components_path = os.path.join( + os.environ["HOME"], "projects/cgal_tools/rm_connected_components" + ) if not os.path.exists(rm_connected_components_path): raise Exception("Cannot find the tool:" + rm_connected_components_path) - cmd = [rm_connected_components_path, args.num_components_to_keep, - args.num_min_faces_in_component, input_mesh, output_mesh] + cmd = [ + rm_connected_components_path, + args.num_components_to_keep, + args.num_min_faces_in_component, + input_mesh, + output_mesh, + ] + + log_file = os.path.join(args.output_dir, "rm_connected_components_log.txt") + print( + "Removing small connected components from mesh. Writing the output log to: " + + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) - log_file = os.path.join(args.output_dir, 'rm_connected_components_log.txt') - print('Removing small connected components from mesh. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) def simplify_mesh(input_mesh, output_mesh, args): - simplify_mesh_path = os.path.join(os.environ['HOME'], - 'projects/cgal_tools/simplify_mesh') + simplify_mesh_path = os.path.join( + os.environ["HOME"], "projects/cgal_tools/simplify_mesh" + ) if not os.path.exists(simplify_mesh_path): raise Exception("Cannot find the tool:" + simplify_mesh_path) cmd = [simplify_mesh_path, args.edge_keep_ratio, input_mesh, output_mesh] - log_file = os.path.join(args.output_dir, 'simplify_mesh_log.txt') - print('Simplifying the mesh. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) + log_file = os.path.join(args.output_dir, "simplify_mesh_log.txt") + print("Simplifying the mesh. Writing the output log to: " + log_file) + run_cmd(cmd, log_file, verbose=args.verbose) + + +def undistort_images( + args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale +): -def undistort_images(args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale): - - undistort_image_path = os.path.join(args.astrobee_build_dir, - 'devel/lib/camera/undistort_image') + undistort_image_path = os.path.join( + args.astrobee_build_dir, "devel/lib/camera/undistort_image" + ) if not os.path.exists(undistort_image_path): raise Exception("Cannot find the undistort_image tool: " + undistort_image_path) - - cmd = [undistort_image_path, '--undistorted_crop_win', undist_crop_win, - '--save_bgr', '--robot_camera', cam_type, '-image_list', dist_image_list, - '--scale', str(scale), '--output_directory', undist_dir] - log_file = os.path.join(args.output_dir, 'undist_' + cam_type + '_log.txt') - print('Undistorting ' + cam_type + ' images. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) + cmd = [ + undistort_image_path, + "--undistorted_crop_win", + undist_crop_win, + "--save_bgr", + "--robot_camera", + cam_type, + "-image_list", + dist_image_list, + "--scale", + str(scale), + "--output_directory", + undist_dir, + ] + + log_file = os.path.join(args.output_dir, "undist_" + cam_type + "_log.txt") + print( + "Undistorting " + cam_type + " images. Writing the output log to: " + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + def create_texrecon_cameras(args, src_path, undist_dir, cam_type): - cam_to_texrecon_path = os.path.join(src_path, 'cameras_to_texrecon.py') + cam_to_texrecon_path = os.path.join(src_path, "cameras_to_texrecon.py") if not os.path.exists(cam_to_texrecon_path): raise Exception("Cannot find: " + cam_to_texrecon_path) - cmd = ['python', cam_to_texrecon_path, '--camera_dir', args.output_dir, - '--undistorted_image_dir', undist_dir, '--camera_type', cam_type] - - log_file = os.path.join(args.output_dir, 'setup_texrecon_' + cam_type + '_log.txt') - print('Preparing texrecon cameras for ' + cam_type + '. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) + cmd = [ + "python", + cam_to_texrecon_path, + "--camera_dir", + args.output_dir, + "--undistorted_image_dir", + undist_dir, + "--camera_type", + cam_type, + ] + + log_file = os.path.join(args.output_dir, "setup_texrecon_" + cam_type + "_log.txt") + print( + "Preparing texrecon cameras for " + + cam_type + + ". Writing the output log to: " + + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + def run_texrecon(args, src_path, mesh, undist_dir, cam_type): # That is one long path texrecon_path = os.path.dirname(os.path.dirname(os.path.dirname(exec_path))) - texrecon_path = os.path.join(texrecon_path, - 'build/geometry_mapper/texrecon/src/texrecon-build', - 'apps/texrecon/texrecon') + texrecon_path = os.path.join( + texrecon_path, + "build/geometry_mapper/texrecon/src/texrecon-build", + "apps/texrecon/texrecon", + ) if not os.path.exists(texrecon_path): raise Exception("Cannot find: " + texrecon_path) - - texrecon_dir=os.path.join(args.output_dir, cam_type + '_texture/run') + + texrecon_dir = os.path.join(args.output_dir, cam_type + "_texture/run") parent_dir = os.path.dirname(texrecon_dir) if os.path.isdir(parent_dir): # Wipe the existing directory print("Removing recursively old directory: " + parent_dir) shutil.rmtree(parent_dir) mkdir_p(texrecon_dir) - - cmd = [texrecon_path, undist_dir, mesh, texrecon_dir, - '-o', 'gauss_clamping', '-d', 'view_dir_dot_face_dir', - '--keep_unseen_faces'] - - log_file = os.path.join(args.output_dir, 'texrecon_' + cam_type + '_log.txt') - print('Running texrecon for ' + cam_type + '. Writing the output log to: ' + log_file) - run_cmd(cmd, log_file, verbose = args.verbose) - - textured_mesh = texrecon_dir + '.obj' + + cmd = [ + texrecon_path, + undist_dir, + mesh, + texrecon_dir, + "-o", + "gauss_clamping", + "-d", + "view_dir_dot_face_dir", + "--keep_unseen_faces", + ] + + log_file = os.path.join(args.output_dir, "texrecon_" + cam_type + "_log.txt") + print( + "Running texrecon for " + cam_type + ". Writing the output log to: " + log_file + ) + run_cmd(cmd, log_file, verbose=args.verbose) + + textured_mesh = texrecon_dir + ".obj" return textured_mesh + # Find the ratio of a sci cam image's width and the width from the camera config file. def find_sci_cam_scale(image_file): # width from the imag file img = cv2.imread(image_file) image_width = img.shape[1] - + # Width from the config file - config_file = os.environ['ASTROBEE_CONFIG_DIR'] + '/cameras.config' - - with open(config_file, 'r') as file: + config_file = os.environ["ASTROBEE_CONFIG_DIR"] + "/cameras.config" + + with open(config_file, "r") as file: text = file.read() - m = re.match('^.*?sci_cam\s*=\s*\{\s*width\s*=\s*(\d+)', text, re.DOTALL) + m = re.match("^.*?sci_cam\s*=\s*\{\s*width\s*=\s*(\d+)", text, re.DOTALL) if not m: print("Could not parse sci cam width from: " + config_file) sys.exit(1) config_width = m.group(1) - - return float(image_width)/float(config_width) + + return float(image_width) / float(config_width) + def texture_mesh(src_path, cam_type, mesh, args): - dist_image_list = os.path.join(args.output_dir, cam_type + '_index.txt') + dist_image_list = os.path.join(args.output_dir, cam_type + "_index.txt") with open(dist_image_list) as f: image_files = f.readlines() if len(image_files) == 0: @@ -430,26 +716,32 @@ def texture_mesh(src_path, cam_type, mesh, args): print("Found no images for: " + cam_type) return "" - dist_dir = os.path.join(args.output_dir, 'distorted_' + cam_type) - undist_dir = os.path.join(args.output_dir, 'undistorted_' + cam_type) + dist_dir = os.path.join(args.output_dir, "distorted_" + cam_type) + undist_dir = os.path.join(args.output_dir, "undistorted_" + cam_type) if not args.simulated_data: - + scale = 1.0 - undist_crop_win = '' - + undist_crop_win = "" + if cam_type == "nav_cam": scale = 1.0 - undist_crop_win = '1100 776' - elif cam_type == 'sci_cam': + undist_crop_win = "1100 776" + elif cam_type == "haz_cam": + scale = 1.0 + undist_crop_win = "210 160" + + elif cam_type == "sci_cam": # Deal with the fact that the robot config file may assume # a different resolution than what is in the bag scale = find_sci_cam_scale(image_files[0].rstrip()) - crop_wd = 2*int(round(625.0 * scale)) # an even number - crop_ht = 2*int(round(500.0 * scale)) # an even number - undist_crop_win = str(crop_wd) + ' ' + str(crop_ht) - - undistort_images(args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale) + crop_wd = 2 * int(round(625.0 * scale)) # an even number + crop_ht = 2 * int(round(500.0 * scale)) # an even number + undist_crop_win = str(crop_wd) + " " + str(crop_ht) + + undistort_images( + args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale + ) create_texrecon_cameras(args, src_path, undist_dir, cam_type) textured_mesh = run_texrecon(args, src_path, mesh, undist_dir, cam_type) else: @@ -459,6 +751,7 @@ def texture_mesh(src_path, cam_type, mesh, args): return textured_mesh + def copy_dir(src, dst, symlinks=False, ignore=None): for item in os.listdir(src): s = os.path.join(src, item) @@ -467,11 +760,12 @@ def copy_dir(src, dst, symlinks=False, ignore=None): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) - + + def merge_poses_and_clouds(args): - ''' + """ Merge individual reconstructions from previous invocations of this tool. - ''' + """ input_dirs = args.merge_maps.split() if len(input_dirs) == 0: @@ -487,9 +781,9 @@ def merge_poses_and_clouds(args): items = os.listdir(input_dir) for item in items: - if item.endswith('_log.txt'): + if item.endswith("_log.txt"): continue - + # This copy is awkward because shutil.copytree cannot copy into an existing directory s = os.path.join(input_dir, item) d = os.path.join(output_dir, item) @@ -510,11 +804,11 @@ def merge_poses_and_clouds(args): d2 = os.path.join(d, item2) if not os.path.isdir(s2): shutil.copy2(s2, d2) - + # merge the index files index_files = os.listdir(output_dir) for index_file in index_files: - if not index_file.endswith('index.txt'): + if not index_file.endswith("index.txt"): continue # Merge the index files by concatenating them @@ -528,14 +822,17 @@ def merge_poses_and_clouds(args): for line in f_in.readlines(): line = line.replace(input_dir, output_dir) f_out.write(line) - -if __name__ == '__main__': - + + +if __name__ == "__main__": + (src_path, exec_path, args) = process_args(sys.argv) - geometry_mapper_path = os.path.join(exec_path, 'geometry_mapper') - batch_tsdf_path = os.path.join(os.environ['HOME'], 'catkin_ws/devel/lib/voxblox_ros/batch_tsdf') - + geometry_mapper_path = os.path.join(exec_path, "geometry_mapper") + batch_tsdf_path = os.path.join( + os.environ["HOME"], "catkin_ws/devel/lib/voxblox_ros/batch_tsdf" + ) + sanity_checks(geometry_mapper_path, batch_tsdf_path, args) setup_outputs(args) @@ -548,67 +845,75 @@ def merge_poses_and_clouds(args): else: merge_poses_and_clouds(args) - fused_mesh = os.path.join(args.output_dir, 'fused_mesh.ply') + fused_mesh = os.path.join(args.output_dir, "fused_mesh.ply") if start_step <= 1 and args.external_mesh == "": fuse_clouds(batch_tsdf_path, fused_mesh, args) - + # Smothing must happen before hole-filling, to remove weird # artifacts - smooth_mesh = os.path.join(args.output_dir, 'smooth_mesh.ply') + smooth_mesh = os.path.join(args.output_dir, "smooth_mesh.ply") if start_step <= 2 and args.external_mesh == "": attempt = 1 smoothe_mesh(fused_mesh, smooth_mesh, args, attempt) # Fill holes - hole_filled_mesh = os.path.join(args.output_dir, 'hole_filled_mesh.ply') + hole_filled_mesh = os.path.join(args.output_dir, "hole_filled_mesh.ply") if start_step <= 3 and args.external_mesh == "": attempt = 1 fill_holes_in_mesh(smooth_mesh, hole_filled_mesh, args, attempt) # Rm small connected components - clean_mesh = os.path.join(args.output_dir, 'clean_mesh.ply') + clean_mesh = os.path.join(args.output_dir, "clean_mesh.ply") if start_step <= 4 and args.external_mesh == "": rm_connected_components(hole_filled_mesh, clean_mesh, args) - # Smoothe again - smooth_mesh2 = os.path.join(args.output_dir, 'smooth_mesh2.ply') + # Smoothe again + smooth_mesh2 = os.path.join(args.output_dir, "smooth_mesh2.ply") if start_step <= 5 and args.external_mesh == "": attempt = 2 smoothe_mesh(clean_mesh, smooth_mesh2, args, attempt) - + # Fill holes again. That is necessary, and should happen after smoothing. # Mesh cleaning creates small holes and they can't be cleaned well # without some more smoothing like above. - hole_filled_mesh2 = os.path.join(args.output_dir, 'hole_filled_mesh2.ply') + hole_filled_mesh2 = os.path.join(args.output_dir, "hole_filled_mesh2.ply") if start_step <= 6 and args.external_mesh == "": attempt = 2 fill_holes_in_mesh(smooth_mesh2, hole_filled_mesh2, args, attempt) # Smoothe again as filling holes can make the mesh a little rough - smooth_mesh3 = os.path.join(args.output_dir, 'smooth_mesh3.ply') + smooth_mesh3 = os.path.join(args.output_dir, "smooth_mesh3.ply") if start_step <= 7 and args.external_mesh == "": attempt = 3 smoothe_mesh(hole_filled_mesh2, smooth_mesh3, args, attempt) # Simplify the mesh - simplified_mesh = os.path.join(args.output_dir, 'simplified_mesh.ply') + simplified_mesh = os.path.join(args.output_dir, "simplified_mesh.ply") if start_step <= 8 and args.external_mesh == "": simplify_mesh(smooth_mesh3, simplified_mesh, args) if args.external_mesh != "": # So that can texture on top of it simplified_mesh = args.external_mesh - - nav_textured_mesh = '' - sci_textured_mesh = '' + + nav_textured_mesh = "" + haz_textured_mesh = "" + sci_textured_mesh = "" if start_step <= 9: # For simulated data texturing nav cam images is not supported - if (not args.simulated_data) and (args.camera_type == 'all' or \ - args.camera_type == 'nav_cam'): - nav_textured_mesh = texture_mesh(src_path, 'nav_cam', simplified_mesh, args) + if (not args.simulated_data) and ( + args.camera_type == "all" or args.camera_type == "nav_cam" + ): + nav_textured_mesh = texture_mesh(src_path, "nav_cam", simplified_mesh, args) - if args.camera_type == 'all' or args.camera_type == 'sci_cam': - sci_textured_mesh = texture_mesh(src_path, 'sci_cam', simplified_mesh, args) + # For simulated data texturing haz cam images is not supported + if (not args.simulated_data) and ( + args.camera_type == "all" or args.camera_type == "haz_cam" + ): + haz_textured_mesh = texture_mesh(src_path, "haz_cam", simplified_mesh, args) + + if args.camera_type == "all" or args.camera_type == "sci_cam": + sci_textured_mesh = texture_mesh(src_path, "sci_cam", simplified_mesh, args) if args.external_mesh == "": print("Fused mesh: " + fused_mesh) @@ -621,9 +926,12 @@ def merge_poses_and_clouds(args): print("Simplified mesh: " + simplified_mesh) else: print("External mesh: " + args.external_mesh) - + if nav_textured_mesh != "": print("Nav cam textured mesh: " + nav_textured_mesh) + if haz_textured_mesh != "": + print("Haz cam textured mesh: " + haz_textured_mesh) + if sci_textured_mesh != "": print("Sci cam textured mesh: " + sci_textured_mesh) diff --git a/dense_map/geometry_mapper/tools/image_picker.cc b/dense_map/geometry_mapper/tools/image_picker.cc index d430c79b..d8e14135 100644 --- a/dense_map/geometry_mapper/tools/image_picker.cc +++ b/dense_map/geometry_mapper/tools/image_picker.cc @@ -265,9 +265,9 @@ int main(int argc, char** argv) { // Write the images to disk bool save_grayscale = true; // For feature matching need grayscale - double found_time = -1.0; // won't be used, but expected by the api - int bag_pos = 0; // reset this each time for (size_t nav_it = 0; nav_it < nav_cam_timestamps.size(); nav_it++) { + double found_time = -1.0; // won't be used, but expected by the api + int bag_pos = 0; // reset this each time cv::Mat image; if (!dense_map::lookupImage(nav_cam_timestamps[nav_it], nav_cam_handle.bag_msgs, save_grayscale, image, bag_pos, diff --git a/dense_map/readme.md b/dense_map/readme.md index ff4b89f1..b0600d31 100644 --- a/dense_map/readme.md +++ b/dense_map/readme.md @@ -1,8 +1,6 @@ \page idm Dense Map -## Overview - This cluster of nodes addresses the dense map capabilities, turning the sensor data into maps of the space. \subpage geometric_streaming_mapper diff --git a/dense_map/volumetric_mapper/readme.md b/dense_map/volumetric_mapper/readme.md index 7999979f..d682f031 100644 --- a/dense_map/volumetric_mapper/readme.md +++ b/dense_map/volumetric_mapper/readme.md @@ -1,12 +1,15 @@ \page volumetric_mapper Volumetric Mapper +Volumetric Mapper +============== - -## Overview +Overview +--------- The wifi mapper subscribes to all the wifi signal strength messages and prcesses them for 3D visualization. Customization os the produced maps is defined in isaac/config/dense_map/wifi_mapper.config -## Trace Map +Trace Map +--------- The trace map is drawn at a certain resolution, within this resolution, every signal received is averaged. The result is depicted using MarkerArray's where the cube size is the map resolution. @@ -15,7 +18,8 @@ plot_trace - enable or disable the trace map calculation resolution - resolution of the trace map where whithin it, the measurements will be averaged -## Wifi 3D Map +Wifi 3D Map +--------- The Wifi Mapper uses Gaussian Process Regression to mapp the ISS regarding the wifi signal strength. It makes use of the libgp library. When a new measure is obtained, the value is recorded in the GP object. When the timer responsible for calculating the wifi interpolation is enables, all the recorded data is processed. diff --git a/img_analysis/readme.md b/img_analysis/readme.md index f503a758..c9293ae4 100644 --- a/img_analysis/readme.md +++ b/img_analysis/readme.md @@ -1,11 +1,16 @@ \page ano Anomaly Detector -## Overview +Image Anomaly Detector +==================== + +Overview +--------- The Image anomaly detector contains a set of tools to analyse incoming images, using Convolutional Neural Networks, CNN's. To build, train and test the CNN's we use PyTorch. -## TorchLib +TorchLib +--------- This package is needed in the anomaly/img_analysis node, such that we can analyse the image, looking for anomalies. The first step is to download the LibTorch ZIP archive, the link might change, best to go to https://pytorch.org/ and select Linux->LibTorch->C++/Java @@ -23,7 +28,8 @@ To link the path, add this to your '$HOME/.bashrc' file: export CMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH:/path/to/libtorch/share/cmake/Torch -## Define and train the CNN +Define and train the CNN +--------- The python code containing the CNN definition and training is in resources/vent_cnn.py @@ -34,7 +40,8 @@ num_epochs - number of epochs to train, default 30 model_name - saved model name, default "model_cnn.pt" trace_model_name - saved traced model name, default "traced_model_cnn.pt" -### Get training data +Get training data +--------- To get training data, a tool is available which will read the poses from a vents file and others file. The tool will change the robot's pose and take pictures automatically. For the should be activated when the simulation is spawned like so (should be spawned in an undocked position such that the dock simulation does not interfere with the manual set of the pose): @@ -52,7 +59,8 @@ robot_dist - Robot's distance to vent, standard is 1m train_pics_per_vent - Number of pictures taken per vent/other for train data test_pics_per_vent - Number of pictures taken per vent/other for test data -## Test single picture +Test single picture +--------- There is a script, analyse_img.py, in the resources/ folder, which takes as argument the path of a picture taken with the sci_cam, processing it and outputing the classification result. This algorithm is useful to make sure that the C++ API for Pytorch is working properly. diff --git a/img_analysis/resources/analyse_img.py b/img_analysis/resources/analyse_img.py deleted file mode 100644 index 3336ae4c..00000000 --- a/img_analysis/resources/analyse_img.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2021, United States Government, as represented by the -# Administrator of the National Aeronautics and Space Administration. -# -# All rights reserved. -# -# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking -# platform" software is licensed under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import torch -from torch import nn -from torch import optim -import torch.nn.functional as F -from torchvision import datasets, transforms, models -import numpy as np -from pathlib import Path -import matplotlib.pyplot as plt -from PIL import Image - -import torchvision.transforms as transforms - -# Parameters -image = Image.open("./analysed_images/img3.jpg") - -test_transforms = transforms.Compose([transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) -image_tensor = test_transforms(image).float() -image_tensor = image_tensor.unsqueeze_(0) - -print(image_tensor[0, :5]) - - -model = models.densenet121(pretrained=True) -model.classifier = nn.Sequential(nn.Linear(1024, 256), - nn.ReLU(), - nn.Dropout(0.2), - nn.Linear(256, 3), - nn.LogSoftmax(dim=1)) -model.load_state_dict(torch.load('model_cnn.pt')) -model.eval() - -output = model(image_tensor) -print(output) - -if output is 0: - print("Vent is obstructed") -else if output is 1: - print("Vent is free") -else if output is 2: - print("Could not identify a vent") - -index = output.data.numpy().argmax() -print(index) - -# 0 : -0.3883 -2.1018 -1.6115 -# 1 : -0.3694 -1.6222 -2.1950 -# 2 : -0.029524 -3.546348 -8.242915 -# 3 : -0.9386 -0.5036 -5.4123 \ No newline at end of file diff --git a/img_analysis/resources/vent_cnn.py b/img_analysis/resources/vent_cnn.py deleted file mode 100644 index 6de3063a..00000000 --- a/img_analysis/resources/vent_cnn.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) 2021, United States Government, as represented by the -# Administrator of the National Aeronautics and Space Administration. -# -# All rights reserved. -# -# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking -# platform" software is licensed under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import matplotlib.pyplot as plt - -import torch -from torch import nn -from torch import optim -import torch.nn.functional as F -from torchvision import datasets, transforms, models -import numpy as np -from pathlib import Path -import matplotlib.pyplot as plt - -# Parameters -data_dir = str(Path.home()) + '/datasets/vent_analysis' # specify data path -classes = ['free', 'obstacle', 'unknown'] # specify the image classes -num_epochs = 30 # number of epochs to train -model_name = 'model_cnn.pt' # saved model name -trace_model_name = "traced_model_cnn.pt" # saved traced model name - -# Define transforms for the training data and testing data -train_transforms = transforms.Compose([transforms.Resize([256, 256]), - transforms.RandomRotation(30), - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) - -test_transforms = transforms.Compose([transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) - -# Pass transforms in here, then run the next cell to see how the transforms look -train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) -test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) - -trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True) -testloader = torch.utils.data.DataLoader(test_data, batch_size=64) - -# Visualize some of the data----------------------- -# helper function to un-normalize and display an image -def imshow(img): - img = img / 2 + 0.5 # unnormalize - plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image -# obtain one batch of training images -dataiter = iter(trainloader) -images, labels = dataiter.next() -images = images.numpy() # convert images to numpy for display -# plot the images in the batch, along with the corresponding labels -fig = plt.figure(figsize=(25, 4)) -# display 20 images -for idx in np.arange(20): - ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) - imshow(images[idx]) - ax.set_title(classes[labels[idx]]) - -# Use GPU if it's available -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -model = models.densenet121(pretrained=True) - -# Freeze parameters so we don't backprop through them -for param in model.parameters(): - param.requires_grad = False - -model.classifier = nn.Sequential(nn.Linear(1024, 256), - nn.ReLU(), - nn.Dropout(0.2), - nn.Linear(256, 3), - nn.LogSoftmax(dim=1)) - -criterion = nn.NLLLoss() - -# Only train the classifier parameters, feature parameters are frozen -optimizer = optim.Adam(model.classifier.parameters(), lr=0.003) - -model.to(device); - -epochs = num_epochs -steps = 0 -running_loss = 0 -print_every = 5 -test_loss_min = np.Inf -for epoch in range(epochs): - for inputs, labels in trainloader: - steps += 1 - # Move input and label tensors to the default device - inputs, labels = inputs.to(device), labels.to(device) - - optimizer.zero_grad() - - logps = model.forward(inputs) - loss = criterion(logps, labels) - loss.backward() - optimizer.step() - - running_loss += loss.item() - - if steps % print_every == 0: - test_loss = 0 - accuracy = 0 - model.eval() - with torch.no_grad(): - for inputs, labels in testloader: - inputs, labels = inputs.to(device), labels.to(device) - logps = model.forward(inputs) - batch_loss = criterion(logps, labels) - - test_loss += batch_loss.item() - - # Calculate accuracy - ps = torch.exp(logps) - top_p, top_class = ps.topk(1, dim=1) - equals = top_class == labels.view(*top_class.shape) - accuracy += torch.mean(equals.type(torch.FloatTensor)).item() - - print("Epoch ", epoch + 1, "/", epochs, ".. " - "Train loss: ", running_loss / print_every, ".. " - "Test loss: ", test_loss / len(testloader), ".. " - "Test accuracy: ", accuracy / len(testloader), "") - running_loss = 0 - - # save model if validation loss has decreased - if test_loss <= test_loss_min: - torch.save(model.state_dict(), model_name) - test_loss_min = test_loss - - model.to("cpu"); - # An example input you would normally provide to your model's forward() method. - example = torch.rand(1, 3, 224, 224) - # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing. - traced_script_module = torch.jit.trace(model, example) - output = traced_script_module(torch.ones(1, 3, 224, 224)) - print(output) - traced_script_module.save(trace_model_name) - model.to("cuda"); - - model.train() diff --git a/img_analysis/scripts/analyse_img.py b/img_analysis/scripts/analyse_img.py index 3336ae4c..ae9a58aa 100644 --- a/img_analysis/scripts/analyse_img.py +++ b/img_analysis/scripts/analyse_img.py @@ -16,25 +16,27 @@ # License for the specific language governing permissions and limitations # under the License. -import torch -from torch import nn -from torch import optim -import torch.nn.functional as F -from torchvision import datasets, transforms, models -import numpy as np from pathlib import Path -import matplotlib.pyplot as plt -from PIL import Image +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F import torchvision.transforms as transforms +from PIL import Image +from torch import nn, optim +from torchvision import datasets, models, transforms # Parameters image = Image.open("./analysed_images/img3.jpg") -test_transforms = transforms.Compose([transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) +test_transforms = transforms.Compose( + [ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] +) image_tensor = test_transforms(image).float() image_tensor = image_tensor.unsqueeze_(0) @@ -42,23 +44,25 @@ model = models.densenet121(pretrained=True) -model.classifier = nn.Sequential(nn.Linear(1024, 256), - nn.ReLU(), - nn.Dropout(0.2), - nn.Linear(256, 3), - nn.LogSoftmax(dim=1)) -model.load_state_dict(torch.load('model_cnn.pt')) +model.classifier = nn.Sequential( + nn.Linear(1024, 256), + nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(256, 3), + nn.LogSoftmax(dim=1), +) +model.load_state_dict(torch.load("model_cnn.pt")) model.eval() output = model(image_tensor) print(output) if output is 0: - print("Vent is obstructed") -else if output is 1: - print("Vent is free") -else if output is 2: - print("Could not identify a vent") + print("Vent is obstructed") +elif output is 1: + print("Vent is free") +elif output is 2: + print("Could not identify a vent") index = output.data.numpy().argmax() print(index) @@ -66,4 +70,4 @@ # 0 : -0.3883 -2.1018 -1.6115 # 1 : -0.3694 -1.6222 -2.1950 # 2 : -0.029524 -3.546348 -8.242915 -# 3 : -0.9386 -0.5036 -5.4123 \ No newline at end of file +# 3 : -0.9386 -0.5036 -5.4123 diff --git a/img_analysis/scripts/train_cnn_vent.py b/img_analysis/scripts/train_cnn_vent.py index 6de3063a..d04a222e 100644 --- a/img_analysis/scripts/train_cnn_vent.py +++ b/img_analysis/scripts/train_cnn_vent.py @@ -16,41 +16,45 @@ # License for the specific language governing permissions and limitations # under the License. -import matplotlib.pyplot as plt +from pathlib import Path +import matplotlib.pyplot as plt +import numpy as np import torch -from torch import nn -from torch import optim import torch.nn.functional as F -from torchvision import datasets, transforms, models -import numpy as np -from pathlib import Path -import matplotlib.pyplot as plt +from torch import nn, optim +from torchvision import datasets, models, transforms # Parameters -data_dir = str(Path.home()) + '/datasets/vent_analysis' # specify data path -classes = ['free', 'obstacle', 'unknown'] # specify the image classes -num_epochs = 30 # number of epochs to train -model_name = 'model_cnn.pt' # saved model name -trace_model_name = "traced_model_cnn.pt" # saved traced model name +data_dir = str(Path.home()) + "/datasets/vent_analysis" # specify data path +classes = ["free", "obstacle", "unknown"] # specify the image classes +num_epochs = 30 # number of epochs to train +model_name = "model_cnn.pt" # saved model name +trace_model_name = "traced_model_cnn.pt" # saved traced model name # Define transforms for the training data and testing data -train_transforms = transforms.Compose([transforms.Resize([256, 256]), - transforms.RandomRotation(30), - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) - -test_transforms = transforms.Compose([transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225])]) +train_transforms = transforms.Compose( + [ + transforms.Resize([256, 256]), + transforms.RandomRotation(30), + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] +) + +test_transforms = transforms.Compose( + [ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] +) # Pass transforms in here, then run the next cell to see how the transforms look -train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) -test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) +train_data = datasets.ImageFolder(data_dir + "/train", transform=train_transforms) +test_data = datasets.ImageFolder(data_dir + "/test", transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True) testloader = torch.utils.data.DataLoader(test_data, batch_size=64) @@ -60,15 +64,17 @@ def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image + + # obtain one batch of training images dataiter = iter(trainloader) images, labels = dataiter.next() -images = images.numpy() # convert images to numpy for display +images = images.numpy() # convert images to numpy for display # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) # display 20 images for idx in np.arange(20): - ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) + ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title(classes[labels[idx]]) @@ -81,18 +87,20 @@ def imshow(img): for param in model.parameters(): param.requires_grad = False -model.classifier = nn.Sequential(nn.Linear(1024, 256), - nn.ReLU(), - nn.Dropout(0.2), - nn.Linear(256, 3), - nn.LogSoftmax(dim=1)) +model.classifier = nn.Sequential( + nn.Linear(1024, 256), + nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(256, 3), + nn.LogSoftmax(dim=1), +) criterion = nn.NLLLoss() # Only train the classifier parameters, feature parameters are frozen optimizer = optim.Adam(model.classifier.parameters(), lr=0.003) -model.to(device); +model.to(device) epochs = num_epochs steps = 0 @@ -132,10 +140,19 @@ def imshow(img): equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)).item() - print("Epoch ", epoch + 1, "/", epochs, ".. " - "Train loss: ", running_loss / print_every, ".. " - "Test loss: ", test_loss / len(testloader), ".. " - "Test accuracy: ", accuracy / len(testloader), "") + print( + "Epoch ", + epoch + 1, + "/", + epochs, + ".. " "Train loss: ", + running_loss / print_every, + ".. " "Test loss: ", + test_loss / len(testloader), + ".. " "Test accuracy: ", + accuracy / len(testloader), + "", + ) running_loss = 0 # save model if validation loss has decreased @@ -143,7 +160,7 @@ def imshow(img): torch.save(model.state_dict(), model_name) test_loss_min = test_loss - model.to("cpu"); + model.to("cpu") # An example input you would normally provide to your model's forward() method. example = torch.rand(1, 3, 224, 224) # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing. @@ -151,6 +168,6 @@ def imshow(img): output = traced_script_module(torch.ones(1, 3, 224, 224)) print(output) traced_script_module.save(trace_model_name) - model.to("cuda"); + model.to("cuda") model.train() diff --git a/isaac.doxyfile b/isaac.doxyfile index 07bc0350..7634df31 100644 --- a/isaac.doxyfile +++ b/isaac.doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "ISAAC" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.1.0 +PROJECT_NUMBER = 0.2.3 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/isaac/CMakeLists.txt b/isaac/CMakeLists.txt index 4065e6f0..07e40f6e 100644 --- a/isaac/CMakeLists.txt +++ b/isaac/CMakeLists.txt @@ -39,7 +39,7 @@ catkin_package( ############# install(DIRECTORY config/ DESTINATION config) -install(FILES scripts/env_wrapper.sh DESTINATION . ) +install(PROGRAMS scripts/env_wrapper.sh DESTINATION . ) install(PROGRAMS scripts/apk_print_version.sh scripts/print_version.sh scripts/cpu_print_version.sh diff --git a/isaac/Subsystems.md b/isaac/Subsystems.md index b4f52361..4f327357 100644 --- a/isaac/Subsystems.md +++ b/isaac/Subsystems.md @@ -1,4 +1,4 @@ -# Subsystems +\page subsystems Subsystems The ISAAC Repo contains the following Modules: diff --git a/isaac/config/behaviors/inspection.config b/isaac/config/behaviors/inspection.config index 6069747e..436035e6 100644 --- a/isaac/config/behaviors/inspection.config +++ b/isaac/config/behaviors/inspection.config @@ -112,14 +112,7 @@ parameters = { unit = "boolean", description = "Shift timestamps to start immediately?" },{ - id = "enable_timesync", - reconfigurable = false, - type = "boolean", - default = false, - unit = "boolean", - description = "Shift timestamps by DTU + (UTC % DTU) to enforce sync" - },{ - id = "enable_faceforward_vent", + id = "enable_faceforward_anomaly", reconfigurable = false, type = "boolean", default = true, @@ -132,42 +125,6 @@ parameters = { default = false, unit = "boolean", description = "Should planning be face-forward only?" - },{ - id = "desired_vel", - reconfigurable = true, - type = "double", - default = -1.0, - min = -1.0, - max = 1.0, - unit = "m/s", - description = "Soft planning limit on net linear velocity" - },{ - id = "desired_accel", - reconfigurable = true, - type = "double", - default = -1.0, - min = -1.0, - max = 1.0, - unit = "m/s", - description = "Soft planning limit on net linear acceleration" - },{ - id = "desired_omega", - reconfigurable = true, - type = "double", - default = -1.0, - min = -1.0, - max = 1.0, - unit = "m/s", - description = "Soft planning limit on net angular velocity" - },{ - id = "desired_alpha", - reconfigurable = true, - type = "double", - default = -1.0, - min = -1.0, - max = 1.0, - unit = "m/s", - description = "Soft planning limit on net angular acceleration" }, -- INSPECTION CONFIG PARAMETERS { @@ -261,25 +218,7 @@ parameters = { unit = "m", description = "Maximum distance between camera and target" },{ - id = "horizontal_fov", - reconfigurable = true, - type = "double", - default = 2.957216683909311, - min = 1.0, - max = 3.0, - unit = "rad", - description = "Camera horizontal field of view" - },{ - id = "aspect_ratio", - reconfigurable = true, - type = "double", - default = 1.0, - min = 1.0, - max = 3.0, - unit = "rad", - description = "Camera Aspect Ratio" - },{ - id = "vent_size_x", + id = "target_size_x", reconfigurable = true, type = "double", default = 0.5, @@ -288,7 +227,7 @@ parameters = { unit = "rad", description = "Size of vent in the x-axis if z is pointing forward" },{ - id = "vent_size_y", + id = "target_size_y", reconfigurable = true, type = "double", default = 0.5, @@ -305,6 +244,51 @@ parameters = { max = 20, unit = "integer", description = "Number of times the inspection will retry to reach a pose" + },{ + id = "pan_min", + reconfigurable = true, + type = "double", + default = -180, + min = -180, + max = 0, + unit = "degrees", + description = "Minimum robot pan angle" + },{ + id = "pan_max", + reconfigurable = true, + type = "double", + default = 180, + min = 0, + max = 180, + unit = "degrees", + description = "Maximum robot pan angle" + },{ + id = "tilt_min", + reconfigurable = true, + type = "double", + default = -90, + min = -90, + max = 0, + unit = "degrees", + description = "Minimum robot tilt angle" + },{ + id = "tilt_max", + reconfigurable = true, + type = "double", + default = 90, + min = 0, + max = 90, + unit = "degrees", + description = "Maximum robot tilt angle" + },{ + id = "overlap", + reconfigurable = true, + type = "double", + default = 0.5, + min = 0.0, + max = 1.0, + unit = "percentage", + description = "Overlap between consecutive images" }} \ No newline at end of file diff --git a/isaac/launch/robot/ILP.launch b/isaac/launch/robot/ILP.launch index ccd325d0..10deca97 100644 --- a/isaac/launch/robot/ILP.launch +++ b/isaac/launch/robot/ILP.launch @@ -97,16 +97,6 @@ - - - - - - - - - - diff --git a/isaac/launch/sim.launch b/isaac/launch/sim.launch index ae1557c4..a9b9b42c 100644 --- a/isaac/launch/sim.launch +++ b/isaac/launch/sim.launch @@ -287,4 +287,7 @@ + + + diff --git a/licenses.csv b/licenses.csv index 6b7bedae..68e84db3 100644 --- a/licenses.csv +++ b/licenses.csv @@ -1,5 +1,6 @@ Type,Name,URL,License,Modified?,Notes Library (external),MVS-Texturing, https://github.com/nmoehrle/mvs-texturing.git,BSD 3-Clause License,YES,for geometric mapper just awaiting pull request to be accepted +Library (external),happly,https://github.com/nmwsharp/happly,MIT license,No,Utility for writing ply files Library (external),libgp,https://github.com/mblum/libgp,BSD-3-Clause License,NO,used for volumetric mapper Library (external),https://github.com/bmegli/wifi-scan,MPL-2.0 License,YES,modified to the point we refactored all their code Library (external),pytorch,https://github.com/pytorch/pytorch,Modified BSD license,NO,used for image classification @@ -38,4 +39,4 @@ ROS package,catkin,https://github.com/catkin/catkin_tools,Apache-2.0 License,NO, ROS package,pcl,https://github.com/ros-gbp/pcl-release,BSD license,NO,None Build tool,GCC / G++ / OpenMP,http://www.openmp.org ,GPLv3,NO,None -Build tool,Doxygen,http://www.stack.nl/~dimitri/doxygen,GPLv2,NO,None \ No newline at end of file +Build tool,Doxygen,http://www.stack.nl/~dimitri/doxygen,GPLv2,NO,None diff --git a/scripts/docker/build.sh b/scripts/docker/build.sh index 0d2b4661..dd6e8541 100755 --- a/scripts/docker/build.sh +++ b/scripts/docker/build.sh @@ -126,6 +126,8 @@ if [ $vm == 0 ]; then docker build ${astrobee_source}/ \ -f ${astrobee_source}/scripts/docker/astrobee.Dockerfile \ --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ + --build-arg ROS_VERSION=${ROS_VERSION} \ + --build-arg PYTHON=${PYTHON} \ -t astrobee/astrobee:latest-ubuntu${UBUNTU_VERSION} # Build ISAAC diff --git a/scripts/git/cpplint_repo.py b/scripts/git/cpplint_repo.py index bf43bbe3..f329a256 100755 --- a/scripts/git/cpplint_repo.py +++ b/scripts/git/cpplint_repo.py @@ -24,6 +24,7 @@ num_errors = 0 + def get_cpplint_path(): return os.path.dirname(os.path.realpath(__file__)) + "/cpplint.py" @@ -31,6 +32,7 @@ def get_cpplint_path(): def get_repo_path(): return os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/../..") + def run_cpplint(filename, cpplint_path): cpplint = importlib.machinery.SourceFileLoader( "cpplint", cpplint_path @@ -47,10 +49,12 @@ def run_cpplint(filename, cpplint_path): cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level) return cpplint.output + def print_objection(): print("Code formatting errors were found.") print("==================================") + def main(): num_errors = 0 @@ -68,7 +72,7 @@ def main(): ): continue for filename in filenames: - if "TinyEXIF" in filename: + if "TinyEXIF" or "happly" in filename: continue if not filename.endswith( ( diff --git a/scripts/git/pre-commit.linter_cpp b/scripts/git/pre-commit.linter_cpp index 7eb616d4..cd15d64f 100755 --- a/scripts/git/pre-commit.linter_cpp +++ b/scripts/git/pre-commit.linter_cpp @@ -102,6 +102,7 @@ def main(): and not "debian" in filename and not "build" in filename and not "TinyEXIF" in filename + and not "happly" in filename ): cpp_files_to_lint.append(filename) diff --git a/scripts/git/pre-commit.linter_python b/scripts/git/pre-commit.linter_python index 06e4069e..9fbb6774 100755 --- a/scripts/git/pre-commit.linter_python +++ b/scripts/git/pre-commit.linter_python @@ -29,36 +29,38 @@ fi failed_lint=false -if ! command -v black &> /dev/null -then - echo "Black was not found, to install check https://github.com/psf/black" - echo "Unfortunately black requires Python 3.6.2+ to run" +command -v black > /dev/null 2>&1 +ans=$? +if [ "$ans" -ne "0" ]; then + echo "'black' was not found. To install, check https://github.com/psf/black." + echo "Note that black requires Python 3.6.2+ to run." exit 0 fi -if ! command -v isort &> /dev/null -then - echo "isort was not found, to install check https://github.com/PyCQA/isort" +command -v isort > /dev/null 2>&1 +ans=$? +if [ "$ans" -ne "0" ]; then + echo "'isort' was not found. To install, check https://github.com/PyCQA/isort." exit 0 fi echo "==================================================" -echo " Analysing with black" +echo " Analysing python code style with 'black'." # This check the files but they will not be commited if `black . --include ${files} --check --quiet`; then - echo "Linter black checks passed" + echo "Linter checks using 'black' passed." else - echo "Errors detected, fixing ..." - black . --include ${files} - failed_lint=true + echo "Errors detected with 'black'. Fixing them. Try to add and commit your files again." + black . --include ${files} + failed_lint=true fi echo "==================================================" -echo " Analysing with isort" +echo " Analysing python code style with 'isort'." if $(isort ${files} --extend-skip cmake --profile black --diff --check-only --quiet >/dev/null); then - echo "Linter isort checks passed" + echo "Linter checks using 'isort' passed." else - echo "Errors detected, fixing ..." + echo "Errors detected with 'isort'. Fixing them. Try to add and commit your files again." isort ${files} --extend-skip cmake --profile black >/dev/null failed_lint=true fi diff --git a/scripts/setup/build_install_dependencies.sh b/scripts/setup/build_install_dependencies.sh index 4fd0b0e6..22a40ff5 100755 --- a/scripts/setup/build_install_dependencies.sh +++ b/scripts/setup/build_install_dependencies.sh @@ -23,7 +23,11 @@ DEP_LOC=$(dirname "$(readlink -f "$0")")/dependencies -#sudo apt-get install -y +sudo apt-get install -y libgtest-dev +cd /usr/src/gtest +sudo cmake CMakeLists.txt +sudo make +sudo cp *.a /usr/lib cd ${DEP_LOC} ./build_install_gp.sh || exit 1 diff --git a/scripts/update_release.sh b/scripts/update_release.sh new file mode 100755 index 00000000..2baf4bb3 --- /dev/null +++ b/scripts/update_release.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +if [ $# -ne 1 ] + then + echo "Usage: $0 VERSION" + exit 0 +fi + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +cd $DIR/.. + +dch -c debian/changelog -v $1 + +sed -i -e "s/\# ISAAC Robot Software v1/\# ISAAC Robot Software v1\n\n\#\# Release $1\n\nINSERT DESCRIPTION HERE/g" RELEASE.md + +sed -i -e "s/^PROJECT_NUMBER.*/PROJECT_NUMBER = $1/g" isaac.doxyfile + +$EDITOR RELEASE.md