From 5c69c6cf286e9e10983a808e2a83c8b30ab8aeba Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Fri, 13 Dec 2024 17:13:38 -0800 Subject: [PATCH] Add pre-commit hooks & BSD 3-Clause license (#206) * Add pre-commit hooks * Fix unknown module issue from msgs * Hardcode pylint version for consistency * Add ros2-devel to workflow branches * Remove pylint from workflow due to inconsistent errors between remote and local * Add license * Fix README numbering * Fix LICENSE numbering * Add license header to yaml files as well --- .codespell-ignore | 2 + .github/LICENSE_HEADER.md | 2 + .github/PULL_REQUEST_TEMPLATE.md | 12 +- .github/workflows/pre-commit.yaml | 44 +++++++ .pre-commit-config.yaml | 69 +++++++++++ .pylintrc | 6 +- LICENSE.md | 28 +++++ README.md | 110 ++++++++++------- ada_feeding/README.md | 74 ++++++------ ada_feeding/ada_feeding/__init__.py | 3 + ada_feeding/ada_feeding/action_server_bt.py | 5 +- ada_feeding/ada_feeding/behaviors/__init__.py | 3 + .../behaviors/acquisition/__init__.py | 3 + .../acquisition/compute_action_constraints.py | 4 +- .../acquisition/compute_food_frame.py | 8 +- .../behaviors/blackboard_behavior.py | 4 +- .../ada_feeding/behaviors/moveit2/__init__.py | 3 + .../moveit2/modify_collision_object.py | 4 +- .../behaviors/moveit2/moveit2_constraints.py | 4 +- .../behaviors/moveit2/moveit2_execute.py | 4 +- .../behaviors/moveit2/moveit2_plan.py | 7 +- .../behaviors/moveit2/servo_move.py | 4 +- .../moveit2/toggle_collision_object.py | 4 +- .../ada_feeding/behaviors/ros/__init__.py | 3 + ada_feeding/ada_feeding/behaviors/ros/msgs.py | 4 +- ada_feeding/ada_feeding/behaviors/ros/tf.py | 4 +- ada_feeding/ada_feeding/behaviors/ros/time.py | 4 +- .../behaviors/transfer/__init__.py | 3 + .../behaviors/transfer/compute_mouth_frame.py | 4 +- .../ada_feeding/decorators/__init__.py | 3 + .../ada_feeding/decorators/on_preempt.py | 3 + .../decorators/timeout_from_blackboard.py | 5 +- ada_feeding/ada_feeding/helpers.py | 3 + ada_feeding/ada_feeding/idioms/__init__.py | 3 + .../ada_feeding/idioms/bite_transfer.py | 4 +- .../ada_feeding/idioms/eventually_swiss.py | 8 +- .../ada_feeding/idioms/ft_thresh_utils.py | 8 +- .../ada_feeding/idioms/pre_moveto_config.py | 8 +- .../idioms/retry_call_ros_service.py | 6 +- .../ada_feeding/idioms/scoped_behavior.py | 5 +- ada_feeding/ada_feeding/idioms/servo_until.py | 11 +- .../ada_feeding/idioms/wait_for_secs.py | 4 +- ada_feeding/ada_feeding/trees/__init__.py | 5 +- .../ada_feeding/trees/acquire_food_tree.py | 14 ++- .../ada_feeding/trees/activate_controller.py | 6 +- .../ada_feeding/trees/move_from_mouth_tree.py | 7 +- ...o_configuration_with_ft_thresholds_tree.py | 4 +- ...configuration_with_wheelchair_wall_tree.py | 4 +- .../ada_feeding/trees/move_to_mouth_tree.py | 7 +- .../ada_feeding/trees/move_to_pose_tree.py | 4 +- ada_feeding/ada_feeding/trees/move_to_tree.py | 6 +- .../ada_feeding/trees/start_servo_tree.py | 4 +- .../ada_feeding/trees/stop_servo_tree.py | 4 +- ada_feeding/ada_feeding/trees/trigger_tree.py | 6 +- ada_feeding/ada_feeding/visitors/__init__.py | 3 + .../ada_feeding/visitors/debug_visitor.py | 4 +- .../ada_feeding/visitors/moveto_visitor.py | 4 +- ada_feeding/ada_feeding/watchdog/__init__.py | 3 + .../ada_feeding/watchdog/estop_condition.py | 12 +- .../watchdog/ft_sensor_condition.py | 6 +- .../watchdog/watchdog_condition.py | 6 +- .../ada_feeding_action_servers_custom.yaml | 3 + .../ada_feeding_action_servers_default.yaml | 3 + ada_feeding/config/ada_watchdog.yaml | 11 +- ada_feeding/launch/ada_feeding_launch.xml | 5 + ada_feeding/scripts/ada_watchdog.py | 3 + ada_feeding/scripts/create_action_servers.py | 7 +- ada_feeding/scripts/dummy_ft_sensor.py | 5 +- ada_feeding/scripts/joint_state_latency.py | 3 + ada_feeding/scripts/robot_state_service.py | 3 + ada_feeding/scripts/save_image.py | 3 + ada_feeding/tests/__init__.py | 2 + ada_feeding/tests/helpers.py | 6 +- ada_feeding/tests/test_eventually_swiss.py | 3 + ada_feeding/tests/test_scoped_behavior.py | 4 + .../ada_feeding_action_select/__init__.py | 2 + .../adapters/__init__.py | 3 + .../adapters/base_adapters.py | 4 +- .../adapters/color_adapter.py | 6 +- .../adapters/hapticnet_adapter.py | 4 +- .../adapters/models/__init__.py | 3 + .../adapters/models/hapticnet.py | 4 +- .../adapters/models/spanet.py | 4 +- .../adapters/spanet_adapter.py | 6 +- .../ada_feeding_action_select/helpers.py | 4 +- .../policies/__init__.py | 3 + .../policies/base_policies.py | 6 +- .../policies/color_policy.py | 4 +- .../policies/linear_policies.py | 6 +- .../policy_service.py | 9 +- .../config/acquisition_library.yaml | 5 +- .../config/acquisition_library.yaml.orig | 2 +- .../config/color_map.yaml | 3 + .../config/full_acquisition_library.yaml | 5 +- .../config/full_acquisition_library.yaml.orig | 2 +- .../config/policies.yaml | 21 ++-- .../ada_feeding_action_select_launch.xml | 5 + .../scripts/modify_ac_library.py | 5 +- ada_feeding_action_select/setup.py | 3 + .../test/test_copyright.py | 3 + ada_feeding_action_select/test/test_pep257.py | 3 + .../action/ActivateController.action | 2 +- ada_feeding_msgs/msg/Mask.msg | 4 +- .../srv/ModifyCollisionObject.srv | 2 +- ada_feeding_perception/README.md | 76 ++++++------ .../ada_feeding_perception/__init__.py | 2 + .../ada_feeding_perception_node.py | 10 +- .../depth_post_processors.py | 4 +- .../ada_feeding_perception/face_detection.py | 9 +- .../food_on_fork_detection.py | 6 + .../food_on_fork_detectors.py | 23 ++-- .../food_on_fork_train_test.py | 3 + .../ada_feeding_perception/helpers.py | 8 +- .../ada_feeding_perception/republisher.py | 3 + .../segment_from_point.py | 10 +- .../ada_feeding_perception/table_detection.py | 8 +- .../test_efficient_sam.py | 3 + .../ada_feeding_perception/test_sam.py | 3 + .../test_segment_from_point.py | 3 + .../config/face_detection.yaml | 5 +- .../config/food_on_fork_detection.yaml | 7 +- .../config/republisher.yaml | 3 + .../config/segment_from_point.yaml | 3 + .../config/table_detection.yaml | 7 +- .../config/test_segment_from_point.yaml | 11 +- .../launch/ada_feeding_perception.launch.py | 3 + .../launch/test_food_segmentation_launch.xml | 7 +- ada_feeding_perception/setup.py | 3 + ada_feeding_perception/test/test_copyright.py | 3 + ada_feeding_perception/test/test_flake8.py | 3 + ada_feeding_perception/test/test_pep257.py | 3 + .../ada_planning_scene/__init__.py | 2 + .../ada_planning_scene/ada_planning_scene.py | 10 +- .../collision_object_manager.py | 7 +- .../ada_planning_scene/helpers.py | 3 + .../planning_scene_initializer.py | 5 +- .../update_from_face_detection.py | 41 ++++--- .../update_from_table_detection.py | 33 ++--- .../ada_planning_scene/workspace_walls.py | 29 +++-- .../config/ada_planning_scene.yaml | 114 +++++++++--------- .../launch/ada_moveit_launch.xml | 5 + ada_planning_scene/setup.py | 3 + ada_planning_scene/test/test_copyright.py | 3 + ada_planning_scene/test/test_flake8.py | 3 + ada_planning_scene/test/test_pep257.py | 3 + nano_bridge/README.md | 2 +- nano_bridge/config/nano_bridge.yaml | 3 + nano_bridge/launch/receiver.launch.xml | 7 +- nano_bridge/launch/sender.launch.xml | 5 + nano_bridge/msg/CompressedImage.msg | 2 +- nano_bridge/nano_bridge/__init__.py | 2 + nano_bridge/nano_bridge/helpers.py | 3 + nano_bridge/nano_bridge/receiver.py | 3 + .../nano_bridge/receiver_compressed_image.py | 8 +- nano_bridge/nano_bridge/sender.py | 3 + .../nano_bridge/sender_compressed_image.py | 8 +- requirements.txt | 8 +- run_camera.sh | 2 - run_nano_bridge.sh | 2 - start.py | 2 +- start_nano.py | 2 +- 161 files changed, 975 insertions(+), 380 deletions(-) create mode 100644 .codespell-ignore create mode 100644 .github/LICENSE_HEADER.md create mode 100644 .github/workflows/pre-commit.yaml create mode 100644 .pre-commit-config.yaml create mode 100644 LICENSE.md mode change 100644 => 100755 ada_feeding/tests/test_eventually_swiss.py mode change 100644 => 100755 ada_feeding/tests/test_scoped_behavior.py mode change 100644 => 100755 ada_feeding_action_select/ada_feeding_action_select/policy_service.py mode change 100644 => 100755 ada_feeding_perception/ada_feeding_perception/republisher.py mode change 100644 => 100755 ada_feeding_perception/ada_feeding_perception/segment_from_point.py mode change 100644 => 100755 ada_feeding_perception/ada_feeding_perception/test_segment_from_point.py mode change 100644 => 100755 ada_planning_scene/ada_planning_scene/ada_planning_scene.py mode change 100644 => 100755 start.py diff --git a/.codespell-ignore b/.codespell-ignore new file mode 100644 index 00000000..853a965b --- /dev/null +++ b/.codespell-ignore @@ -0,0 +1,2 @@ +fof +ans diff --git a/.github/LICENSE_HEADER.md b/.github/LICENSE_HEADER.md new file mode 100644 index 00000000..95965d38 --- /dev/null +++ b/.github/LICENSE_HEADER.md @@ -0,0 +1,2 @@ +Copyright (c) 2024, Personal Robotics Laboratory +License: BSD 3-Clause. See LICENSE.md file in root directory. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ee6caf5d..9b1ac4de 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,14 +1,16 @@ # Description -[TODO: describe, in-detail, what issue this PR addresses and how it addresses it. Link to relevant Github Issues.] +\[TODO: describe, in-detail, what issue this PR addresses and how it addresses it. Link to relevant Github Issues.\] # Testing procedure -[TODO: describe, in-detail, how you tested this. The procedure must be detailed enough for the reviewer(s) to recreate it.] +\[TODO: describe, in-detail, how you tested this. The procedure must be detailed enough for the reviewer(s) to recreate it.\] # Before opening a pull request -- [ ] Format your code using [black formatter](https://black.readthedocs.io/en/stable/) `python3 -m black .` -- [ ] Run your code through [pylint](https://pylint.readthedocs.io/en/latest/) and address all warnings/errors. The only warnings that are acceptable to not address is TODOs that should be addressed in a future PR. From the top-level `ada_feeding` directory, run: `pylint --recursive=y --rcfile=.pylintrc .`. + +- \[ \] `pre-commit run --all-files` +- \[ \] Run your code through [pylint](https://pylint.readthedocs.io/en/latest/). `pylint --recursive=y --rcfile=.pylintrc .`. All warnings but `fixme` must be addressed. # Before Merging -- [ ] `Squash & Merge` + +- \[ \] `Squash & Merge` diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml new file mode 100644 index 00000000..d71693b4 --- /dev/null +++ b/.github/workflows/pre-commit.yaml @@ -0,0 +1,44 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +name: pre-commit + +on: + push: + branches: + - main + - master + - ros2-devel + pull_request: + workflow_dispatch: + +concurrency: + group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" + cancel-in-progress: true + +env: + PYTHON_VERSION: "3.10" + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + ## Run pre-commit and try to apply fixes + - name: Run pre-commit + uses: pre-commit/action@v3.0.1 + - name: Apply fixes from pre-commit + uses: pre-commit-ci/lite-action@v1.0.2 + if: always() + # - name: Install dependencies + # run: | + # python -m pip install --upgrade pip + # pip install --force-reinstall pylint==3.1.0 + # pip install overrides + # - name: Analysing the code with pylint + # run: | + # pylint --recursive=y --rcfile=.pylintrc . --disable fixme --disable import-error diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..e28a4ad4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,69 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-executables-have-shebangs + - id: check-merge-conflict + - id: check-shebang-scripts-are-executable + - id: check-symlinks + - id: check-xml + - id: check-yaml + - id: debug-statements + - id: destroyed-symlinks + - id: detect-private-key + - id: end-of-file-fixer + - id: mixed-line-ending + - id: requirements-txt-fixer + - id: trailing-whitespace + + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.5.0 + hooks: + - id: insert-license + args: + - --license-file + - .github/LICENSE_HEADER.md + - --use-current-year + types_or: [python, yaml] + + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.5.0 + hooks: + - id: insert-license + args: + - --license-file + - .github/LICENSE_HEADER.md + - --use-current-year + - --comment-style + - "" + types_or: [xml] + exclude: ".*package.xml$" + + - repo: https://github.com/psf/black + rev: 23.1.0 + hooks: + - id: black + + - repo: https://github.com/lovesegfault/beautysh + rev: v6.2.1 + hooks: + - id: beautysh + + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.16 + hooks: + - id: mdformat + args: + - --number + + - repo: https://github.com/codespell-project/codespell + rev: v2.2.4 + hooks: + - id: codespell + args: + - --ignore-words=.codespell-ignore diff --git a/.pylintrc b/.pylintrc index 67d4a481..2a1ad9d3 100644 --- a/.pylintrc +++ b/.pylintrc @@ -28,7 +28,8 @@ clear-cache-post-run=no extension-pkg-allow-list=cv2, cv, lxml, - tf2_py + tf2_py, + rclpy._rclpy_pybind11 # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may @@ -37,7 +38,8 @@ extension-pkg-allow-list=cv2, extension-pkg-whitelist=cv2, cv, lxml, - tf2_py + tf2_py, + rclpy._rclpy_pybind11 # Return non-zero exit code if any of these messages/categories are detected, # even if score is above --fail-under value. Syntax same as enable. Messages diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 00000000..49d47d54 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,28 @@ +BSD 3-Clause License + +Copyright (c) 2024, Personal Robotics Laboratory + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 966771c9..ca8e0735 100644 --- a/README.md +++ b/README.md @@ -7,53 +7,71 @@ This README is the definitive source for downloading, installing, and running th ### Setup (Robot Software) 1. [Install ROS2 Humble](https://docs.ros.org/en/humble/Installation.html) (binary packages are recommended over building from source), [configure your environment](https://docs.ros.org/en/humble/Tutorials/Beginner-CLI-Tools/Configuring-ROS2-Environment.html), and [create a workspace](https://docs.ros.org/en/humble/Tutorials/Beginner-Client-Libraries/Creating-A-Workspace/Creating-A-Workspace.html#). - 1. **NOTE**: In the remainder of the instructions, replace `~\colcon_ws` with the path to your workspace. + + 1. **NOTE**: In the remainder of the instructions, replace `~\colcon_ws` with the path to your workspace. + 2. Configure [`pr-rosinstalls`](https://github.com/personalrobotics/pr-rosinstalls) in order to download all necessary repositories. - cd ~ - git clone https://github.com/personalrobotics/pr-rosinstalls.git + ``` + cd ~ + git clone https://github.com/personalrobotics/pr-rosinstalls.git + ``` 3. Pull all the repositories, with the correct branch. Replace `` with one of the options, depending on your authentication method. - sudo apt install python3-wstool # if not already installed - cd ~/colcon_ws/src - wstool init - wstool merge ~/pr-rosinstalls/ada-feeding..rosinstall - wstool up + ``` + sudo apt install python3-wstool # if not already installed + cd ~/colcon_ws/src + wstool init + wstool merge ~/pr-rosinstalls/ada-feeding..rosinstall + wstool up + ``` 4. Configure [`rosdep`](https://docs.ros.org/en/humble/Tutorials/Intermediate/Rosdep.html): - sudo apt install python3-rosdep # if not already installed - sudo rosdep init # if this is the first time using rosdep + ``` + sudo apt install python3-rosdep # if not already installed + sudo rosdep init # if this is the first time using rosdep + ``` 5. (Only for users **with** sudo access) Install [`rosdep`](https://docs.ros.org/en/humble/Tutorials/Intermediate/Rosdep.html) dependencies: - rosdep update - cd ~/colcon_ws - rosdep install --from-paths src -y --ignore-src --as-root=pip:false + ``` + rosdep update + cd ~/colcon_ws + rosdep install --from-paths src -y --ignore-src --as-root=pip:false + ``` 6. Install and fix pip/non-`rosdep` dependencies: - cd ~/colcon_ws/src/ada_feeding - python3 -m pip install -r requirements.txt - - - Upgrade `transforms3d`, since [the release on Ubuntu packages is outdated](https://github.com/matthew-brett/transforms3d/issues/65): `python3 -m pip install transforms3d -U` - - Remove the duplicate matplotlib pip installation caused by installing scikit-spatial with pip (some accounts have required sudo access for this command, other have not. If you do not have sudo access and encounter this, contact a lab member who does): - - ``` - python3 -m pip uninstall matplotlib - ``` - - - [`pyrealsense2` is not released for ARM systems](https://github.com/IntelRealSense/librealsense/issues/6449#issuecomment-650784066), so ARM users will have to [build from source](https://github.com/IntelRealSense/librealsense/blob/master/wrappers/python/readme.md#building-from-source). You may have to add the `-DPYTHON_EXECUTABLE=/usr/bin/python3` flag to the `cmake` command. When running `sudo make install`, pay close attention to which path `pyrealsense2` is installed to and add *that path* to the `PYTHONPATH` -- it should be `/use/local/lib` but may be `/usr/local/OFF`. - + ``` + cd ~/colcon_ws/src/ada_feeding + python3 -m pip install -r requirements.txt + ``` + + - Upgrade `transforms3d`, since [the release on Ubuntu packages is outdated](https://github.com/matthew-brett/transforms3d/issues/65): `python3 -m pip install transforms3d -U` + + - Remove the duplicate matplotlib pip installation caused by installing scikit-spatial with pip (some accounts have required sudo access for this command, other have not. If you do not have sudo access and encounter this, contact a lab member who does): + + ``` + python3 -m pip uninstall matplotlib + ``` + + - [`pyrealsense2` is not released for ARM systems](https://github.com/IntelRealSense/librealsense/issues/6449#issuecomment-650784066), so ARM users will have to [build from source](https://github.com/IntelRealSense/librealsense/blob/master/wrappers/python/readme.md#building-from-source). You may have to add the `-DPYTHON_EXECUTABLE=/usr/bin/python3` flag to the `cmake` command. When running `sudo make install`, pay close attention to which path `pyrealsense2` is installed to and add *that path* to the `PYTHONPATH` -- it should be `/use/local/lib` but may be `/usr/local/OFF`. + 7. Install the JACO SDK (real robot only). All SDKs are listed [here](https://www.kinovarobotics.com/resources?r=79301&s); PRL currently uses the [Gen2 SDK v1.5.1](https://drive.google.com/file/d/1UEQAow0XLcVcPCeQfHK9ERBihOCclkJ9/view). Note that although the latest version of that SDK is for Ubuntu 16.04, it still works on Ubuntu 22.04 (only for x86 systems, not ARM system). + 8. (Optional): We recommend using CycloneDDS as your ROS2 midleware, with a custom configuration file that enables multicast on loopback and prioritizes loopback. Install it with `sudo apt install ros-humble-rmw-cyclonedds-cpp`. Then, add the following lines to your `~/bashrc`: `export RMW_IMPLEMENTATION=rmw_cyclonedds_cpp; export CYCLONEDDS_URI=~/colcon_ws/src/ada_feeding/cyclonedds.xml`. - - Note that you may have to change the name of the fallback interface that CycloneDDS uses if `lo` does not work for that transmission. To do so, run `ifconfig` and either use the name of your ethernet network or WiFi network, depending on how your computer is connected to the internet. - - Note that you have to [rebuild your workspace from scratch after doing this](https://docs.ros.org/en/humble/How-To-Guides/Working-with-multiple-RMW-implementations.html#adding-rmw-implementations-to-your-workspace). + + - Note that you may have to change the name of the fallback interface that CycloneDDS uses if `lo` does not work for that transmission. To do so, run `ifconfig` and either use the name of your ethernet network or WiFi network, depending on how your computer is connected to the internet. + - Note that you have to [rebuild your workspace from scratch after doing this](https://docs.ros.org/en/humble/How-To-Guides/Working-with-multiple-RMW-implementations.html#adding-rmw-implementations-to-your-workspace). + 9. Build your workspace: - cd ~/colcon_ws - colcon build --symlink-install # if sim-only, add '--packages-skip ada_hardware' + ``` + cd ~/colcon_ws + colcon build --symlink-install # if sim-only, add '--packages-skip ada_hardware' + ``` ### Setup (System Configuration) @@ -68,31 +86,39 @@ If you change the e-stop button, the e-stop button's adapter(s), and/or the devi ### Setup (Web App) 1. Install the Node Version Manager (nvm): https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script + 2. Install and use NodeJS 21 (Note: if you have just installed nvm using the previous command, you will need to source your .bashrc or open a new terminal to run these commands): - nvm install 21 - nvm use 21 + ``` + nvm install 21 + nvm use 21 + ``` 3. (Only for users **with** sudo access; this should already be configured on PRL computers) Make Node available to all users, including root: - sudo ln -s "$NVM_DIR/versions/node/$(nvm version)/bin/node" "/usr/local/bin/node" - sudo ln -s "$NVM_DIR/versions/node/$(nvm version)/bin/npm" "/usr/local/bin/npm" - sudo ln -s "$NVM_DIR/versions/node/$(nvm version)/bin/npx" "/usr/local/bin/npx" + ``` + sudo ln -s "$NVM_DIR/versions/node/$(nvm version)/bin/node" "/usr/local/bin/node" + sudo ln -s "$NVM_DIR/versions/node/$(nvm version)/bin/npm" "/usr/local/bin/npm" + sudo ln -s "$NVM_DIR/versions/node/$(nvm version)/bin/npx" "/usr/local/bin/npx" + ``` 4. (Only for users **with** sudo access; this should already be configured on PRL computers) Install `serve` and `pm2` globally. Root access is necessary for `serve` so it can access port 80. - sudo npm install -g serve - npm install -g pm2@latest + ``` + sudo npm install -g serve + npm install -g pm2@latest + ``` 5. Install the web app dependencies. (Note: there will be some vulnerabilities in dependencies. This is okay, since access to the web app is shielded behind our secure router.) - cd ~/colcon_ws/src/feeding_web_interface/feedingwebapp - npm install --legacy-peer-deps - npx playwright install + ``` + cd ~/colcon_ws/src/feeding_web_interface/feedingwebapp + npm install --legacy-peer-deps + npx playwright install + ``` 6. (Optional; this should already be configured on PRL computers) To access the web app on a device other than the one hosting it, enable the desired port for HTTP access: https://www.digitalocean.com/community/tutorials/ufw-essentials-common-firewall-rules-and-commands#allow-all-incoming-http-port-80 - ## Running the Software We use the convenience script `start.py` to launch the software. This script has several command-line arguments, which can be seen by passing the `-h` flag when running the script. @@ -112,7 +138,6 @@ In a browser, access `127.0.0.1` (if on the same device serving the web app), or To close, run `python3 src/ada_feeding/start.py -c` - ### Option B: Running Web App With the Mock Robot This option starts the web app, runs dummy nodes for perception, runs the **real** robot motion code, but runs a mock robot in MoveIt. This is useful for testing robot motion code in simulation before moving onto the real robot. This will start the web app on port `3000` and does not require `sudo` access. @@ -143,7 +168,6 @@ To close, run `python3 src/ada_feeding/start.py --sim dummy -c` - **Something is not working, what do I do?**: All our code runs in `screen` sessions, so the first step is to attach to individual screen sessions to identify where the issue is. Run `screen -ls` to list all screens, run `screen -r ` to attach to a screen session, and `Ctrl-A d` to detach from the screen session. An introduction to `screen` can be found [here](https://astrobiomike.github.io/unix/screen-intro). - **The watchdog is not recognizing my initial e-stop click**: Verify the e-stop is plugged in, and that any other processes accessing the microphone (e.g., Sound Settings) are closed. Run `alsamixer` to see if your user account has access to it. If you do not see sound levels in the terminal, try `sudo alsamixer`. If that works, give your user account permission to access sound: `sudo setfacl -m u:$USER:rw /dev/snd/*` -- **The watchdog is failing due to the F/T sensor**: First, check whether the force-torque sensor is publishing: `ros2 topic echo /wireless_ft/ftSensor3`. If not, the issue is in the `ft` screen (one potential issue is that the alias `ft-sensor` does not point to the right IP address for the force-torque sensor, in which case you should pass the IP address in using the `host` parameter). If so, check the timestamp of the published F/T messages compared to the current time. If it is off, the problem is that NTP got disabled on the force-torque sensor. You have to use `minicom` to re-enable NTP (see [here](https://github.com/personalrobotics/pr_docs/wiki/ADA) for PRL-specific instructions). +- **The watchdog is failing due to the F/T sensor**: First, check whether the force-torque sensor is publishing: `ros2 topic echo /wireless_ft/ftSensor3`. If not, the issue is in the `ft` screen (one potential issue is that the alias `ft-sensor` does not point to the right IP address for the force-torque sensor, in which case you should pass the IP address in using the `host` parameter). If so, check the timestamp of the published F/T messages compared to the current time. If it is off, the problem is that NTP got disabled on the force-torque sensor. You have to use `minicom` to re-enable NTP (see [here](https://github.com/personalrobotics/pr_docs/wiki/ADA) for PRL-specific instructions). - **I get the error `cannot use destroyable because destruction was requested`**: Upgrade to the latest version of`rclpy`. -- **I get the error `rosbags is not installed, or a wrong version is installed (needs 0.9.19). Type Checking against rosbag types will not work. Error: No module named 'rosbags'`**: Upgrade or downgrade to `rosbags` version 0.9.19. - **I get the error `bad option: --env-file=.env` when launching the WebRTC Signalling Server**: You are using an old version of Node; upgrade it to 21. diff --git a/ada_feeding/README.md b/ada_feeding/README.md index 97f8c104..89abd947 100644 --- a/ada_feeding/README.md +++ b/ada_feeding/README.md @@ -7,55 +7,58 @@ The `ada_feeding` package contains the overarching code to run the robot-assiste See the [`ada_feeding` top-level README for setup instructions](https://github.com/personalrobotics/ada_feeding/blob/ros2-devel/README.md). ## Usage + 1. Build your workspace: `colcon build` 2. Source your workspace: `source install/setup.bash` 3. Launch the force-torque sensor: - 1. Dummy node: `ros2 run ada_feeding dummy_ft_sensor.py` - 2. Real node: Follow the instructions in the [`forque_sensor_hardware` README](https://github.com/personalrobotics/forque_sensor_hardware/blob/main/README.md). Note that this is in the _main_ branch, which may not be the default branch. + 1. Dummy node: `ros2 run ada_feeding dummy_ft_sensor.py` + 2. Real node: Follow the instructions in the [`forque_sensor_hardware` README](https://github.com/personalrobotics/forque_sensor_hardware/blob/main/README.md). Note that this is in the _main_ branch, which may not be the default branch. 4. Launch the RealSense & Perception: - 1. Dummy nodes: `ros2 launch feeding_web_app_ros2_test feeding_web_app_dummy_nodes_launch.xml run_motion:=false run_web_bridge:=false` - 2. Real nodes: Follow the instructions in the [`ada_feeding_perception` README](https://github.com/personalrobotics/ada_feeding/blob/ros2-devel/ada_feeding_perception/README.md#usage) + 1. Dummy nodes: `ros2 launch feeding_web_app_ros2_test feeding_web_app_dummy_nodes_launch.xml run_motion:=false run_web_bridge:=false` + 2. Real nodes: Follow the instructions in the [`ada_feeding_perception` README](https://github.com/personalrobotics/ada_feeding/blob/ros2-devel/ada_feeding_perception/README.md#usage) 5. Run the action servers: - 1. If the e-stop is plugged in: `ros2 launch ada_feeding ada_feeding_launch.xml` - 2. If the e-stop is not plugged in: `ros2 launch ada_feeding ada_feeding_launch.xml use_estop:=false` + 1. If the e-stop is plugged in: `ros2 launch ada_feeding ada_feeding_launch.xml` + 2. If the e-stop is not plugged in: `ros2 launch ada_feeding ada_feeding_launch.xml use_estop:=false` 6. Launch MoveIt2: - 1. Sim (RVIZ): `ros2 launch ada_moveit demo_feeding.launch.py sim:=mock` - 2. Real Robot: `ros2 launch ada_moveit demo_feeding.launch.py` + 1. Sim (RVIZ): `ros2 launch ada_moveit demo_feeding.launch.py sim:=mock` + 2. Real Robot: `ros2 launch ada_moveit demo_feeding.launch.py` 7. Test it: - 1. Test the individual actions with the command line interface: - 1. `ros2 action send_goal /MoveAbovePlate ada_feeding_msgs/action/MoveTo "{}" --feedback` - 2. `ros2 action send_goal /AcquireFood ada_feeding_msgs/action/AcquireFood "{header: {stamp: {sec: 0, nanosec: 0}, frame_id: ''}, detected_food: {roi: {x_offset: 0, y_offset: 0, height: 0, width: 0, do_rectify: false}, mask: {header: {stamp: {sec: 0, nanosec: 0}, frame_id: ''}, format: '', data: []}, item_id: '', confidence: 0.0}}" --feedback` - 3. `ros2 action send_goal /MoveToRestingPosition ada_feeding_msgs/action/MoveTo "{}" --feedback` - 4. `ros2 action send_goal /MoveToStagingConfiguration ada_feeding_msgs/action/MoveTo "{}" --feedback` - 5. `ros2 action send_goal /MoveToMouth ada_feeding_msgs/action/MoveToMouth "{}" --feedback` - 6. `ros2 action send_goal /MoveFromMouth ada_feeding_msgs/action/MoveTo "{}" --feedback` - 7. `ros2 action send_goal /MoveToStowLocation ada_feeding_msgs/action/MoveTo "{}" --feedback` - 2. Test the individual actions with the web app: - 1. Launch the web app ([instructions here](https://github.com/personalrobotics/feeding_web_interface/tree/main/feedingwebapp)) - 2. Go through the web app, ensure the expected actions happen on the robot. - 3. Test the watchdog in isolation: - 1. Echo the watchdog topic: `ros2 topic echo /ada_watchdog` - 2. Induce errors in the force-torque sensor and verify the watchdog reacts appropiately. Errors could include: - - Terminating the node. - - Disconnecting the physical force-torque sensor from power. - - Inducing corruption by causing the dummy note to output zero-variance values: `ros2 param set /dummy_ft_sensor std '[0.1, 0.1, 0.0, 0.1, 0.1, 0.1]'` - 4. Test the watchdog with the action servers: - 1. Start an action (see above), induce errors in the force-torque sensor (see above), and ensure the action gets aborted. - 2. While there are errors in the force-torque sensor, ensure no new goals get accepted. - 3. Terminate the watchdog node and ensure in-progress actions get aborted and incoming goals get rejected. - 4. Launch the action servers without the watchdog node running and ensure it rejects all goals. + 1. Test the individual actions with the command line interface: + 1. `ros2 action send_goal /MoveAbovePlate ada_feeding_msgs/action/MoveTo "{}" --feedback` + 2. `ros2 action send_goal /AcquireFood ada_feeding_msgs/action/AcquireFood "{header: {stamp: {sec: 0, nanosec: 0}, frame_id: ''}, detected_food: {roi: {x_offset: 0, y_offset: 0, height: 0, width: 0, do_rectify: false}, mask: {header: {stamp: {sec: 0, nanosec: 0}, frame_id: ''}, format: '', data: []}, item_id: '', confidence: 0.0}}" --feedback` + 3. `ros2 action send_goal /MoveToRestingPosition ada_feeding_msgs/action/MoveTo "{}" --feedback` + 4. `ros2 action send_goal /MoveToStagingConfiguration ada_feeding_msgs/action/MoveTo "{}" --feedback` + 5. `ros2 action send_goal /MoveToMouth ada_feeding_msgs/action/MoveToMouth "{}" --feedback` + 6. `ros2 action send_goal /MoveFromMouth ada_feeding_msgs/action/MoveTo "{}" --feedback` + 7. `ros2 action send_goal /MoveToStowLocation ada_feeding_msgs/action/MoveTo "{}" --feedback` + 2. Test the individual actions with the web app: + 1. Launch the web app ([instructions here](https://github.com/personalrobotics/feeding_web_interface/tree/main/feedingwebapp)) + 2. Go through the web app, ensure the expected actions happen on the robot. + 3. Test the watchdog in isolation: + 1. Echo the watchdog topic: `ros2 topic echo /ada_watchdog` + 2. Induce errors in the force-torque sensor and verify the watchdog reacts appropriately. Errors could include: + - Terminating the node. + - Disconnecting the physical force-torque sensor from power. + - Inducing corruption by causing the dummy note to output zero-variance values: `ros2 param set /dummy_ft_sensor std '[0.1, 0.1, 0.0, 0.1, 0.1, 0.1]'` + 4. Test the watchdog with the action servers: + 1. Start an action (see above), induce errors in the force-torque sensor (see above), and ensure the action gets aborted. + 2. While there are errors in the force-torque sensor, ensure no new goals get accepted. + 3. Terminate the watchdog node and ensure in-progress actions get aborted and incoming goals get rejected. + 4. Launch the action servers without the watchdog node running and ensure it rejects all goals. ## Writing Behavior Trees That Can Be Wrapped Into Action Servers ### Subclassing `ActionServerBT` In order to wrap a behavior tree into an action server, you have to subclass `ActionServerBT`. Your subclass must implement the following functions: + - `create_tree`: Create the tree. This is called **once**, sometime before the first call to this action server begins executing. - `send_goal`: Take a goal from the action server and send it to the tree. This is called **once per goal** that is sent to this action server. - `get_feedback`: Return the feedback type that the ROS2 action is expecting, by examining the tree's current state. This is called **once per action server iteration** (which is the same as being called once per `tree.tick()`). - `get_result`: Return the result type that the ROS2 action is expecting, by examining the tree's current state. This is called **once per goal**, after the tree's status has switched away from `RUNNING`. Note the following design paradigms when writing this subclass of `ActionServerBT`: + - None of the above functions should be blocking. - All communication between this file and the behavior tree should take place through the blackboard. - Only this file should need to import the ROS action type for the action server. The specific behaviors in the tree should not need access to that information. @@ -66,6 +69,7 @@ Note the following design paradigms when writing this subclass of `ActionServerB All robot motion actions publish the same feedback and result (see [`ada_feeding_msgs`](https://github.com/personalrobotics/ada_feeding/tree/amaln/goal_constraints/ada_feeding_msgs/action)). Hence, the shared logic of publishing that feedback and result is implemented in the `MoveToTree`. Any tree that publishes that feedback and result should subclass `MoveToTree` and implement the abstract function `create_move_to_tree`. ### Creating Robot Motions with Arbitrary Goal and Path Constraints + Robot motion is implemented as a behavior, `MoveTo`. By itself, the behavior will do nothing, since no goal or path constraints will be set. Goal and path constraints are implemented as decorators on top of the `MoveTo` behavior, that can be chained together in order to have multiple constraints. See the already implemented trees (e.g., `MoveToConfiguration`, `MoveToConfigurationWithPosePathConstraints`) for examples. ### Updating the Config File @@ -74,11 +78,11 @@ Then, to execute the behavior tree in `create_action_servers.py`, you have to mo - `server_names` (string array, required): A list of the names of the action servers to create. - For each server name: - - `action_type` (string, required): the ROS action type associated with this action, e.g., `ada_feeding_msgs.action.MoveTo` - - `tree_class` (string, required): the subclass of `ActionServerBT` that creates the tree and provides functions to interface with the action server, e.g., `ada_feeding.trees.MoveAbovePlate` - - `tree_kws` (string array, optional, default `[]`): a list with the keywords for custom arguments to pass to the initialization function of the `tree_class`. - - `tree_kwargs` (dict, optional, default `{}`): a dictionary of keyword arguments to pass to the initialization function of the `tree_class`. - - `tick_rate` (int, optional, default `30`): the frequency (Hz) at which the action server should tick the tree. + - `action_type` (string, required): the ROS action type associated with this action, e.g., `ada_feeding_msgs.action.MoveTo` + - `tree_class` (string, required): the subclass of `ActionServerBT` that creates the tree and provides functions to interface with the action server, e.g., `ada_feeding.trees.MoveAbovePlate` + - `tree_kws` (string array, optional, default `[]`): a list with the keywords for custom arguments to pass to the initialization function of the `tree_class`. + - `tree_kwargs` (dict, optional, default `{}`): a dictionary of keyword arguments to pass to the initialization function of the `tree_class`. + - `tick_rate` (int, optional, default `30`): the frequency (Hz) at which the action server should tick the tree. Once you've made the above changes, you should be good to go. Rebuild your workspace and run the launchfile! diff --git a/ada_feeding/ada_feeding/__init__.py b/ada_feeding/ada_feeding/__init__.py index 03e5d50e..64f93d37 100644 --- a/ada_feeding/ada_feeding/__init__.py +++ b/ada_feeding/ada_feeding/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains code that links the ROS2 action server to py_trees. diff --git a/ada_feeding/ada_feeding/action_server_bt.py b/ada_feeding/ada_feeding/action_server_bt.py index 8d245eae..92d55335 100644 --- a/ada_feeding/ada_feeding/action_server_bt.py +++ b/ada_feeding/ada_feeding/action_server_bt.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines ActionServerBT, an abstract class that links ROS2 action servers with py_trees. @@ -65,7 +68,7 @@ def preempt_goal(self, tree: py_trees.trees.BehaviourTree) -> bool: The default behavior of this function calls the `stop` method on the root of the behavior tree. This should block until all nodes of the - behavior tree have succesfully terminated. Subclasses can override this + behavior tree have successfully terminated. Subclasses can override this method if they want to implement a different behavior. Parameters diff --git a/ada_feeding/ada_feeding/behaviors/__init__.py b/ada_feeding/ada_feeding/behaviors/__init__.py index b8e1365a..17bdf988 100644 --- a/ada_feeding/ada_feeding/behaviors/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains custom py_tree behaviors for the Ada Feeding project. """ diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/__init__.py b/ada_feeding/ada_feeding/behaviors/acquisition/__init__.py index 5180a8ca..4abb9369 100644 --- a/ada_feeding/ada_feeding/behaviors/acquisition/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/acquisition/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This subpackage contains custom py_tree behaviors for Food Acquisition. """ diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py b/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py index 97015782..73174412 100644 --- a/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py +++ b/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines behaviors that take an AcquisitionSchema.msg object (or return from AcquisitionSelect.srv) and computes the outputs diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py b/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py index baf31135..1bba4f09 100644 --- a/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py +++ b/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the ComputeFoodFrame behavior, which computes the food frame from the Mask provided from a perception algorithm. @@ -295,7 +297,7 @@ def update(self) -> py_trees.common.Status: full_contours = np.vstack(contours) rect = cv.minAreaRect(full_contours) points = cv.boxPoints(rect) - # Get direction of +X axix in pixel-space + # Get direction of +X axis in pixel-space # Take longest side if np.linalg.norm(points[0] - points[1]) > np.linalg.norm( points[1] - points[2] @@ -336,7 +338,7 @@ def update(self) -> py_trees.common.Status: x_unit.vector, x_pos.vector ) - # # If you need to send a fixed food frame to the robot arm, e.g., to + # # If you need to send a fixed food frame to the robot arm, e.g., to # # debug off-centering issues, uncomment this and modify the translation. # deg = 90 # fork roll # world_to_food_transform.transform.translation.x = 0.26262263022586224 diff --git a/ada_feeding/ada_feeding/behaviors/blackboard_behavior.py b/ada_feeding/ada_feeding/behaviors/blackboard_behavior.py index c9dd4637..1d41e386 100644 --- a/ada_feeding/ada_feeding/behaviors/blackboard_behavior.py +++ b/ada_feeding/ada_feeding/behaviors/blackboard_behavior.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ Generic behavior with additional functionality for blackboard remappings (inspired by BehaviorTree.CPP) diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/__init__.py b/ada_feeding/ada_feeding/behaviors/moveit2/__init__.py index c7ec8073..a38d447a 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This subpackage contains custom py_tree behaviors for MoveIt2 """ diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/modify_collision_object.py b/ada_feeding/ada_feeding/behaviors/moveit2/modify_collision_object.py index 59ffc05e..17d14b42 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/modify_collision_object.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/modify_collision_object.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the ModifyCollisionObject behavior, which adds, moves, or removes a collision object in MoveIt's planning scene. diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py index 74c620c0..551514d9 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines behaviors that package individual blackboard keys / constants into a dictionary diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_execute.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_execute.py index 105e6a2c..57a02b76 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_execute.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_execute.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the MoveIt2Excute behavior, which uses pymoveit2 to execute and JointTrajectory. diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_plan.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_plan.py index 0d469d1c..200129bf 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_plan.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_plan.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the MoveIt2Plan behavior, which uses pymoveit2 to plan a path using the provided path and goal constraints. @@ -792,8 +794,9 @@ def visualize_trajectory(action_name: str, traj: JointTrajectory) -> None: # pylint: disable=import-outside-toplevel # No need to import graphing libraries if we aren't saving the trajectory import csv - from ament_index_python.packages import get_package_share_directory + import matplotlib.pyplot as plt + from ament_index_python.packages import get_package_share_directory # Get the filepath, excluding the extension file_dir = os.path.join( diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/servo_move.py b/ada_feeding/ada_feeding/behaviors/moveit2/servo_move.py index 2521a83c..24ffca7a 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/servo_move.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/servo_move.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the ServoMove behavior, which publishes a Twist to the Moveit2 Servo node. diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/toggle_collision_object.py b/ada_feeding/ada_feeding/behaviors/moveit2/toggle_collision_object.py index e453371b..725e078e 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/toggle_collision_object.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/toggle_collision_object.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the ToggleCollisionObject behavior, which (dis)allows collisions between the robot and a collision object already in diff --git a/ada_feeding/ada_feeding/behaviors/ros/__init__.py b/ada_feeding/ada_feeding/behaviors/ros/__init__.py index 80268fc1..6a0f8487 100644 --- a/ada_feeding/ada_feeding/behaviors/ros/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/ros/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains custom py_tree behaviors for interacting with ROS. """ diff --git a/ada_feeding/ada_feeding/behaviors/ros/msgs.py b/ada_feeding/ada_feeding/behaviors/ros/msgs.py index 8a8830fe..29652d31 100644 --- a/ada_feeding/ada_feeding/behaviors/ros/msgs.py +++ b/ada_feeding/ada_feeding/behaviors/ros/msgs.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines utility behaviors for interacting with and updating ROS messages. diff --git a/ada_feeding/ada_feeding/behaviors/ros/tf.py b/ada_feeding/ada_feeding/behaviors/ros/tf.py index 75e427d6..f1c5dce7 100644 --- a/ada_feeding/ada_feeding/behaviors/ros/tf.py +++ b/ada_feeding/ada_feeding/behaviors/ros/tf.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines utility behaviors for interacting with the TF tree. """ diff --git a/ada_feeding/ada_feeding/behaviors/ros/time.py b/ada_feeding/ada_feeding/behaviors/ros/time.py index 4b9f16c0..4b999e3b 100644 --- a/ada_feeding/ada_feeding/behaviors/ros/time.py +++ b/ada_feeding/ada_feeding/behaviors/ros/time.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines utility behaviors related to ROS time. """ diff --git a/ada_feeding/ada_feeding/behaviors/transfer/__init__.py b/ada_feeding/ada_feeding/behaviors/transfer/__init__.py index 20aa3c8d..d08c805c 100644 --- a/ada_feeding/ada_feeding/behaviors/transfer/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/transfer/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This subpackage contains custom py_tree behaviors for Bite Transfer. """ diff --git a/ada_feeding/ada_feeding/behaviors/transfer/compute_mouth_frame.py b/ada_feeding/ada_feeding/behaviors/transfer/compute_mouth_frame.py index 44bb8e75..468370f1 100644 --- a/ada_feeding/ada_feeding/behaviors/transfer/compute_mouth_frame.py +++ b/ada_feeding/ada_feeding/behaviors/transfer/compute_mouth_frame.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the ComputeMouthFrame behavior, which takes in the PointStamped containing the (x, y, z) position of the mouth in the camera frame, and returns the diff --git a/ada_feeding/ada_feeding/decorators/__init__.py b/ada_feeding/ada_feeding/decorators/__init__.py index 4d097a0e..1f2eaad6 100644 --- a/ada_feeding/ada_feeding/decorators/__init__.py +++ b/ada_feeding/ada_feeding/decorators/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains custom py_tree decorators for the Ada Feeding project. """ diff --git a/ada_feeding/ada_feeding/decorators/on_preempt.py b/ada_feeding/ada_feeding/decorators/on_preempt.py index 508183b0..25babc49 100644 --- a/ada_feeding/ada_feeding/decorators/on_preempt.py +++ b/ada_feeding/ada_feeding/decorators/on_preempt.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ NOTE: This is a multi-tick version of the decorator discussed in https://github.com/splintered-reality/py_trees/pull/427 . Once a diff --git a/ada_feeding/ada_feeding/decorators/timeout_from_blackboard.py b/ada_feeding/ada_feeding/decorators/timeout_from_blackboard.py index 8e118257..b174bcb8 100644 --- a/ada_feeding/ada_feeding/decorators/timeout_from_blackboard.py +++ b/ada_feeding/ada_feeding/decorators/timeout_from_blackboard.py @@ -1,6 +1,9 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module extends the Timeout decorator -to accept a blackboard namespace and +to accept a blackboard namespace and """ # Standard imports diff --git a/ada_feeding/ada_feeding/helpers.py b/ada_feeding/ada_feeding/helpers.py index 1dba6868..b045d98a 100644 --- a/ada_feeding/ada_feeding/helpers.py +++ b/ada_feeding/ada_feeding/helpers.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines a number of helper functions that are reused throughout the Ada Feeding project. diff --git a/ada_feeding/ada_feeding/idioms/__init__.py b/ada_feeding/ada_feeding/idioms/__init__.py index e7fb0d2a..5a387cd9 100644 --- a/ada_feeding/ada_feeding/idioms/__init__.py +++ b/ada_feeding/ada_feeding/idioms/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains custom idioms that are used in the Ada Feeding project. diff --git a/ada_feeding/ada_feeding/idioms/bite_transfer.py b/ada_feeding/ada_feeding/idioms/bite_transfer.py index 4c7c58fc..6a74136f 100644 --- a/ada_feeding/ada_feeding/idioms/bite_transfer.py +++ b/ada_feeding/ada_feeding/idioms/bite_transfer.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains helper functions to generate the behaviors used in bite transfer, e.g., the MoveToMouth and MoveFromMouth behaviors. diff --git a/ada_feeding/ada_feeding/idioms/eventually_swiss.py b/ada_feeding/ada_feeding/idioms/eventually_swiss.py index bbbba6a7..bd44fe16 100644 --- a/ada_feeding/ada_feeding/idioms/eventually_swiss.py +++ b/ada_feeding/ada_feeding/idioms/eventually_swiss.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ NOTE: This is a preempt-handling version of the idiom discussed in https://github.com/splintered-reality/py_trees/pull/427 . Once a @@ -5,10 +8,13 @@ idiom should be removed in favor of the main py_trees one. """ +# Standard imports import typing +# Third-party imports from py_trees import behaviour, behaviours, composites +# Local imports from ada_feeding.decorators import OnPreempt @@ -29,7 +35,7 @@ def eventually_swiss( This is a swiss knife version of the eventually idiom that facilitates a multi-tick response for specialised handling work sequence's completion status. Specifically, this idiom - guarentees the following: + guarantees the following: 1. The on_success behaviour is ticked only if the workers all return SUCCESS. 2. The on_failure behaviour is ticked only if at least one worker returns FAILURE. 3. The on_preempt behaviour is ticked only if `stop(INVALID)` is called on the diff --git a/ada_feeding/ada_feeding/idioms/ft_thresh_utils.py b/ada_feeding/ada_feeding/idioms/ft_thresh_utils.py index 955b14c8..81258dc3 100644 --- a/ada_feeding/ada_feeding/idioms/ft_thresh_utils.py +++ b/ada_feeding/ada_feeding/idioms/ft_thresh_utils.py @@ -1,7 +1,9 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ -This module defines the ft_thresh_satisifed idiom. +This module defines the ft_thresh_satisfied idiom. Which subscribes to the FT sensor and detects whether a given FT threshold is satisfied at the point of execution. """ @@ -32,7 +34,7 @@ def ft_thresh_satisfied( ) -> py_trees.behaviour.Behaviour: """ Returns a behavior that subscribes to the FT sensor and checks whether threshold - is immediately satisifed. FAILURE if not, SUCCESS if so. RUNNING while waiting + is immediately satisfied. FAILURE if not, SUCCESS if so. RUNNING while waiting for force/torque message. Parameters diff --git a/ada_feeding/ada_feeding/idioms/pre_moveto_config.py b/ada_feeding/ada_feeding/idioms/pre_moveto_config.py index 82513e2f..00055f51 100644 --- a/ada_feeding/ada_feeding/idioms/pre_moveto_config.py +++ b/ada_feeding/ada_feeding/idioms/pre_moveto_config.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the pre_moveto_config idiom, which returns a behavior that calls the ROS services that should be called before any MoveTo behavior. @@ -196,7 +198,7 @@ def pre_moveto_config( operator=partial( add_logging_to_operator, operator_fn=operator.eq, - success_msg="Succesfully retared F/T sensor", + success_msg="Successfully retared F/T sensor", failure_msg="Failed to retare F/T sensor", ), ) @@ -238,7 +240,7 @@ def pre_moveto_config( operator=partial( add_logging_to_operator, operator_fn=set_parameter_response_all_success, - success_msg="Succesfully set F/T threshold", + success_msg="Successfully set F/T threshold", failure_msg="Failed to set F/T threshold", ), ) diff --git a/ada_feeding/ada_feeding/idioms/retry_call_ros_service.py b/ada_feeding/ada_feeding/idioms/retry_call_ros_service.py index 4cdaeda0..6bc1d0f2 100644 --- a/ada_feeding/ada_feeding/idioms/retry_call_ros_service.py +++ b/ada_feeding/ada_feeding/idioms/retry_call_ros_service.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the retry_call_ros_service idiom, which will call a ROS service, optionally check the response value, and retry up to a pre-specified @@ -28,7 +30,7 @@ def retry_call_ros_service( server_execution_timeout_sec: float = 10.0, ) -> py_trees.behaviour.Behaviour: """ - Creates a behavior that calls a ROS service and optionally checkes the response. + Creates a behavior that calls a ROS service and optionally checks the response. If there is a failure for any kind (e.g., service is unavailable, doesn't return, response does not pass the check, etc.) it retries up to `max_retries` times. diff --git a/ada_feeding/ada_feeding/idioms/scoped_behavior.py b/ada_feeding/ada_feeding/idioms/scoped_behavior.py index 94d666a0..ca6a90b4 100644 --- a/ada_feeding/ada_feeding/idioms/scoped_behavior.py +++ b/ada_feeding/ada_feeding/idioms/scoped_behavior.py @@ -1,10 +1,13 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the `scoped_behavior` idiom, which is a way to run a main behavior within the scope of a pre and post behavior. In expected usage, the pre behavior will open or create a resources, the main behavior will use those resources, and the post behavior will close or delete the -resources. The idiom guarentees the following: +resources. The idiom guarantees the following: 1. The main behavior will not be ticked unless the pre behavior returns SUCCESS. 2. The behavior returned by this idiom will not reach a terminal (non-RUNNING) diff --git a/ada_feeding/ada_feeding/idioms/servo_until.py b/ada_feeding/ada_feeding/idioms/servo_until.py index f34078c6..f2b10db7 100644 --- a/ada_feeding/ada_feeding/idioms/servo_until.py +++ b/ada_feeding/ada_feeding/idioms/servo_until.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the servo_until idiom, which takes in a condition behavior that should never return RUNNING, and an instance of the ServoMove behavior and @@ -44,7 +46,6 @@ ) from ada_feeding.helpers import BlackboardKey - # Hardcoded parts of the behavior names that compute the distance for Servoing. # This is used by MoveToVisitor to populate feedback. Try to make these as # unique as possible to avoid clashes. @@ -60,8 +61,8 @@ def servo_until( servo_inputs: Dict[str, Union[BlackboardKey, Any]], ) -> py_trees.behaviour.Behaviour: """ - An idiom to implement servoing until a condition is met. Sepcifically, this - behavior wll iteratively sense the world, check if a condition is met, if so + An idiom to implement servoing until a condition is met. Specifically, this + behavior will iteratively sense the world, check if a condition is met, if so return SUCCESS, else compute a twist and send it to the servo controller. Parameters @@ -305,7 +306,7 @@ def servo_until_pose( # Check whether the ee_to_target_pose_stamped is within the tolerances. # Note that this behavior can technically return FAILURE for a reason # other than the condition not being met, specifically if the blackboard - # variable doens't exist or the permissions are not correctly set. + # variable doesn't exist or the permissions are not correctly set. # Therefore, it is crucial to be vigilant when testing this idiom. check_behavior = py_trees.behaviours.CheckBlackboardVariableValue( name=f"{name} ServoUntilPose CheckTolerances", diff --git a/ada_feeding/ada_feeding/idioms/wait_for_secs.py b/ada_feeding/ada_feeding/idioms/wait_for_secs.py index 45a426fc..3fc03cdf 100644 --- a/ada_feeding/ada_feeding/idioms/wait_for_secs.py +++ b/ada_feeding/ada_feeding/idioms/wait_for_secs.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the wait_for_secs idiom, which returns a behavior that waits for a specified number of seconds. diff --git a/ada_feeding/ada_feeding/trees/__init__.py b/ada_feeding/ada_feeding/trees/__init__.py index ab6c1c8c..39c6e1c2 100644 --- a/ada_feeding/ada_feeding/trees/__init__.py +++ b/ada_feeding/ada_feeding/trees/__init__.py @@ -1,7 +1,10 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains custom behavior trees that are used in the Ada Feeding project. All of these trees implement the ActionServerBT interface, in order -to be wrapped in a ROS action server. +to be wrapped in a ROS action server. """ # pylint: disable=cyclic-import diff --git a/ada_feeding/ada_feeding/trees/acquire_food_tree.py b/ada_feeding/ada_feeding/trees/acquire_food_tree.py index 293b4325..f0cc4153 100644 --- a/ada_feeding/ada_feeding/trees/acquire_food_tree.py +++ b/ada_feeding/ada_feeding/trees/acquire_food_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the AcquireFood behavior tree and provides functions to wrap that behavior tree in a ROS2 action server. @@ -57,9 +59,13 @@ from ada_feeding.trees import MoveToTree, StartServoTree, StopServoTree +# pylint: disable=too-many-lines +# This tree is the cruz of bite acquisition, hence is long. + + class AcquireFoodTree(MoveToTree): """ - A behvaior tree to select and execute an acquisition + A behaviour tree to select and execute an acquisition action (see ada_feeding_msgs.action.AcquisitionSchema) for a given food mask in ada_feeding_msgs.action.AcquireFood. @@ -527,7 +533,7 @@ def move_above_plan( name="Success", # Set Approach F/T Thresh pre_behavior=( - Success() + Success() # pylint: disable=abstract-class-instantiated # ToggleCollisionObject( # name="AllowTable", # ns=name, @@ -538,7 +544,7 @@ def move_above_plan( # ) ), post_behavior=( - Success() + Success() # pylint: disable=abstract-class-instantiated # ToggleCollisionObject( # name="DisallowTable", # ns=name, diff --git a/ada_feeding/ada_feeding/trees/activate_controller.py b/ada_feeding/ada_feeding/trees/activate_controller.py index 3ca534ee..6cc588c9 100644 --- a/ada_feeding/ada_feeding/trees/activate_controller.py +++ b/ada_feeding/ada_feeding/trees/activate_controller.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the ActivateController behavior tree, which activates a user-specified controller and deactivates all others. @@ -43,7 +45,7 @@ def __init__( ---------- node: The ROS node. controller_to_activate: The name of the controller to activate. If None, - deactive all controllers without activating any. + deactivate all controllers without activating any. all_controller_names: The names of all controllers. If None, the default controllers are "jaco_arm_cartesian_controller", "jaco_arm_controller", and "jaco_arm_servo_controller" diff --git a/ada_feeding/ada_feeding/trees/move_from_mouth_tree.py b/ada_feeding/ada_feeding/trees/move_from_mouth_tree.py index 711b868e..4ae6c17c 100644 --- a/ada_feeding/ada_feeding/trees/move_from_mouth_tree.py +++ b/ada_feeding/ada_feeding/trees/move_from_mouth_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the MoveFromMouthTree behaviour tree and provides functions to wrap that behaviour tree in a ROS2 action server. @@ -356,8 +358,7 @@ def speed(post_stamped: PoseStamped) -> Tuple[float, float]: + post_stamped.pose.position.y**2.0 + post_stamped.pose.position.z**2.0 ) ** 0.5 - if pose_distance > max_pose_distance: - max_pose_distance = pose_distance + max_pose_distance = max(max_pose_distance, pose_distance) prop = (max_pose_distance - pose_distance) / max_pose_distance # ** 2.0 return ( self.max_linear_speed_to_staging_configuration * prop diff --git a/ada_feeding/ada_feeding/trees/move_to_configuration_with_ft_thresholds_tree.py b/ada_feeding/ada_feeding/trees/move_to_configuration_with_ft_thresholds_tree.py index 3000b9ce..b1a44a43 100644 --- a/ada_feeding/ada_feeding/trees/move_to_configuration_with_ft_thresholds_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_configuration_with_ft_thresholds_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the MoveToConfigurationWithFTThresholdsTree behavior tree and provides functions to wrap that behavior tree in a ROS2 action server. diff --git a/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py b/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py index 8ac0beac..50bd68ba 100644 --- a/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the MoveToConfigurationWithWheelchairWallTree behaviour tree. This tree was designed for the MoveToStagingConfiguration action, but can be diff --git a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py index 1b681942..d075298b 100644 --- a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the MoveToMouthTree behaviour tree and provides functions to wrap that behaviour tree in a ROS2 action server. @@ -240,8 +242,7 @@ def speed(post_stamped: PoseStamped) -> Tuple[float, float]: + post_stamped.pose.position.y**2.0 + post_stamped.pose.position.z**2.0 ) ** 0.5 - if pose_distance > max_pose_distance: - max_pose_distance = pose_distance + max_pose_distance = max(max_pose_distance, pose_distance) prop = (max_pose_distance - pose_distance) / max_pose_distance # ** 0.5 return ( self.max_linear_speed * (1.0 - prop) diff --git a/ada_feeding/ada_feeding/trees/move_to_pose_tree.py b/ada_feeding/ada_feeding/trees/move_to_pose_tree.py index a9a39ba9..8911b725 100644 --- a/ada_feeding/ada_feeding/trees/move_to_pose_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_pose_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the MoveToPoseTree behavior tree and provides functions to wrap that behavior tree in a ROS2 action server. diff --git a/ada_feeding/ada_feeding/trees/move_to_tree.py b/ada_feeding/ada_feeding/trees/move_to_tree.py index 338005fa..7d26a602 100644 --- a/ada_feeding/ada_feeding/trees/move_to_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the abstract MoveTo behavior tree and provides functions to wrap that behavior tree in a ROS2 action server. @@ -91,7 +93,7 @@ def get_result( # If the tree succeeded, return success if tree.root.status == py_trees.common.Status.SUCCESS: result_msg.status = result_msg.STATUS_SUCCESS - # If the tree failed, detemine whether it was a planning or motion failure + # If the tree failed, determine whether it was a planning or motion failure elif tree.root.status == py_trees.common.Status.FAILURE: # Get Feedback Visitor to investigate failure cause feedback_visitor = None diff --git a/ada_feeding/ada_feeding/trees/start_servo_tree.py b/ada_feeding/ada_feeding/trees/start_servo_tree.py index 8053ea7b..e321c2a3 100644 --- a/ada_feeding/ada_feeding/trees/start_servo_tree.py +++ b/ada_feeding/ada_feeding/trees/start_servo_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the StartServoTree behavior tree, which turns the servo controller on and starts MoveIt Servo. diff --git a/ada_feeding/ada_feeding/trees/stop_servo_tree.py b/ada_feeding/ada_feeding/trees/stop_servo_tree.py index 96d01b2e..a551652c 100644 --- a/ada_feeding/ada_feeding/trees/stop_servo_tree.py +++ b/ada_feeding/ada_feeding/trees/stop_servo_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the StopServoTree behavior tree, which stops MoveIt Servo. """ diff --git a/ada_feeding/ada_feeding/trees/trigger_tree.py b/ada_feeding/ada_feeding/trees/trigger_tree.py index 70c1abdb..a2ee7543 100644 --- a/ada_feeding/ada_feeding/trees/trigger_tree.py +++ b/ada_feeding/ada_feeding/trees/trigger_tree.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the abstract TriggerTree class, which is a behavior tree that implements the `Trigger.action` interface defined in `ada_feeding_msgs`. @@ -20,7 +22,7 @@ class TriggerTree(ActionServerBT, ABC): """ - An abstract behvaior tree for any behavior that should get triggered and send + An abstract behaviour tree for any behavior that should get triggered and send feedback and results according to the `Trigger.action` interface defined in `ada_feeding_msgs`. """ diff --git a/ada_feeding/ada_feeding/visitors/__init__.py b/ada_feeding/ada_feeding/visitors/__init__.py index 0c9cb721..a39d5da3 100644 --- a/ada_feeding/ada_feeding/visitors/__init__.py +++ b/ada_feeding/ada_feeding/visitors/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains custom BT Visitors that are used in the Ada Feeding project. All of these extend VisitorBase. diff --git a/ada_feeding/ada_feeding/visitors/debug_visitor.py b/ada_feeding/ada_feeding/visitors/debug_visitor.py index cb98be52..b777d4fd 100644 --- a/ada_feeding/ada_feeding/visitors/debug_visitor.py +++ b/ada_feeding/ada_feeding/visitors/debug_visitor.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This visitor logs the name and status of the behavior it is on. """ diff --git a/ada_feeding/ada_feeding/visitors/moveto_visitor.py b/ada_feeding/ada_feeding/visitors/moveto_visitor.py index 9305e05a..11dab49d 100644 --- a/ada_feeding/ada_feeding/visitors/moveto_visitor.py +++ b/ada_feeding/ada_feeding/visitors/moveto_visitor.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the MoveIt2 Visitor. diff --git a/ada_feeding/ada_feeding/watchdog/__init__.py b/ada_feeding/ada_feeding/watchdog/__init__.py index 5e315085..38e8aed1 100644 --- a/ada_feeding/ada_feeding/watchdog/__init__.py +++ b/ada_feeding/ada_feeding/watchdog/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains the conditions that are checked by the watchdog. """ diff --git a/ada_feeding/ada_feeding/watchdog/estop_condition.py b/ada_feeding/ada_feeding/watchdog/estop_condition.py index 037241a0..1516ac4d 100644 --- a/ada_feeding/ada_feeding/watchdog/estop_condition.py +++ b/ada_feeding/ada_feeding/watchdog/estop_condition.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains EStopCondition, a watchdog condition that listens for the e-stop button either being clicked or being unplugged and fails if so. @@ -415,7 +417,7 @@ def __configure_system_audio(self) -> None: for the e-stop button. TODO: Although not crucial, we should consider storing the original - system volume, and then restoring it when thos watchdog condition is + system volume, and then restoring it when this watchdog condition is terminated. """ # pylint: disable=too-many-branches, too-many-statements, too-many-locals @@ -579,7 +581,7 @@ def __init_stream(self) -> None: ( f"Error opening audio device with index {device_i}. " f"{EStopCondition.PYAUDIO_STREAM_TROUBLESHOOTING}\n\n" - f"Excpetion: {exc}" + f"Exception: {exc}" ), throttle_duration_sec=1, ) @@ -610,7 +612,7 @@ def __udev_listener(self, action: str, device: pyudev.Device) -> None: self._node.get_logger().info("E-Stop button unplugged") with self.is_mic_unplugged_lock: self.is_mic_unplugged = True - # Although there is also an "add" action, that doesn't have the devce ID. + # Although there is also an "add" action, that doesn't have the device ID. elif action == "change": self._node.get_logger().info("E-Stop button plugged in") with self.is_mic_unplugged_lock: @@ -818,7 +820,7 @@ def check_startup(self) -> List[Tuple[bool, str, str]]: status of a startup condition, a string name describing the condition, and a string detailing the status of the condition. All conditions must be True for the startup condition to be considered passed. - For example, [(False, "Recieved Topic X Data", "Has not received at + For example, [(False, "Received Topic X Data", "Has not received at least one message on topic X")] means that the startup condition has not passed because the node has not received any messages on topic X yet. """ diff --git a/ada_feeding/ada_feeding/watchdog/ft_sensor_condition.py b/ada_feeding/ada_feeding/watchdog/ft_sensor_condition.py index 15322b15..c5193cc0 100644 --- a/ada_feeding/ada_feeding/watchdog/ft_sensor_condition.py +++ b/ada_feeding/ada_feeding/watchdog/ft_sensor_condition.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains FTSensorCondition, a watchdog condition that accumulates all force-torque sensor readings and checks that the sensor is still publishing and @@ -164,7 +166,7 @@ def check_startup(self) -> List[Tuple[bool, str, str]]: status of a startup condition, a string name describing the condition, and a string detailing the status of the condition. All conditions must be True for the startup condition to be considered passed. - For example, [(False, "Recieved Topic X Data", "Has not received at + For example, [(False, "Received Topic X Data", "Has not received at least one message on topic X")] means that the startup condition has not passed because the node has not received any messages on topic X yet. """ diff --git a/ada_feeding/ada_feeding/watchdog/watchdog_condition.py b/ada_feeding/ada_feeding/watchdog/watchdog_condition.py index 87c849ac..b1863b99 100644 --- a/ada_feeding/ada_feeding/watchdog/watchdog_condition.py +++ b/ada_feeding/ada_feeding/watchdog/watchdog_condition.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains WatchdogCondition, an abstract class that every watchdog condition must inherit from. @@ -37,7 +39,7 @@ def check_startup(self) -> List[Tuple[bool, str, str]]: status of a startup condition, a string name describing the condition, and a string detailing the status of the condition. All conditions must be True for the startup condition to be considered passed. - For example, [(False, "Recieved Topic X Data", "Has not received at + For example, [(False, "Received Topic X Data", "Has not received at least one message on topic X")] means that the startup condition has not passed because the node has not received any messages on topic X yet. """ diff --git a/ada_feeding/config/ada_feeding_action_servers_custom.yaml b/ada_feeding/config/ada_feeding_action_servers_custom.yaml index f1062c61..fb5636ae 100644 --- a/ada_feeding/config/ada_feeding_action_servers_custom.yaml +++ b/ada_feeding/config/ada_feeding_action_servers_custom.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # This file is auto-generated by create_action_servers.py ada_feeding_action_servers: ros__parameters: diff --git a/ada_feeding/config/ada_feeding_action_servers_default.yaml b/ada_feeding/config/ada_feeding_action_servers_default.yaml index cfc01871..70fd7daf 100644 --- a/ada_feeding/config/ada_feeding_action_servers_default.yaml +++ b/ada_feeding/config/ada_feeding_action_servers_default.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. ada_feeding_action_servers: ros__parameters: diff --git a/ada_feeding/config/ada_watchdog.yaml b/ada_feeding/config/ada_watchdog.yaml index e576a8d1..183a8c3e 100644 --- a/ada_feeding/config/ada_watchdog.yaml +++ b/ada_feeding/config/ada_watchdog.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. ada_watchdog: ros__parameters: @@ -12,7 +15,7 @@ ada_watchdog: ############################################################################ # If the force_torque sensor has not published a message in this amount of time, - # or any of its dimensions are zero-variance in this amount of time, the watchdog + # or any of its dimensions are zero-variance in this amount of time, the watchdog # will trigger a fault. ft_timeout_sec: 1.0 @@ -20,7 +23,7 @@ ada_watchdog: # Parameters for the EStopCondition of the watchdog ############################################################################ - # The rate at which to sample fromt eh microphone (the e-stop button) + # The rate at which to sample from the microphone (the e-stop button) rate: 48000 # The number of samples to take at a time. If you are getting overflow # errors, try increasing this number. @@ -39,12 +42,12 @@ ada_watchdog: # If there are multiple button clicks within this time range, it is considered a # single click. time_per_click_sec: 0.5 - + # The below parameters are used to set the system microphone volume. It is # important for system microphone volume to be consistent, because the e-stop # button's signal amplitude is based on that. Since each device has different # controls, we specify device-specific configurations. - # + # # Instructions to get the amixer parameters are here: # 1. `pactl_mic_name`: # - Unplug the microphone. diff --git a/ada_feeding/launch/ada_feeding_launch.xml b/ada_feeding/launch/ada_feeding_launch.xml index 6ddc64df..7c2bf870 100644 --- a/ada_feeding/launch/ada_feeding_launch.xml +++ b/ada_feeding/launch/ada_feeding_launch.xml @@ -1,3 +1,8 @@ + + diff --git a/ada_feeding/scripts/ada_watchdog.py b/ada_feeding/scripts/ada_watchdog.py index 00e0b424..21b70bad 100755 --- a/ada_feeding/scripts/ada_watchdog.py +++ b/ada_feeding/scripts/ada_watchdog.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a node, ADAWatchdog, which does the following: 1. Monitors the state of the force-torque sensor, to ensure it is still diff --git a/ada_feeding/scripts/create_action_servers.py b/ada_feeding/scripts/create_action_servers.py index 97bb5564..a0a98413 100755 --- a/ada_feeding/scripts/create_action_servers.py +++ b/ada_feeding/scripts/create_action_servers.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # pylint: disable=too-many-lines """ This module contains a node, CreateActionServers, for creating action servers @@ -947,7 +950,7 @@ def setup_tree(self, tree: py_trees.trees.BehaviourTree) -> None: Runs the initial setup on a behavior tree after creating it. Specifically, this function: (1) sets every behavior's logger - to be the node's logger; and (2) calls teh tree's `setup` function. + to be the node's logger; and (2) calls the tree's `setup` function. Parameters ---------- @@ -1006,7 +1009,7 @@ async def execute_callback(goal_handle: ServerGoalHandle) -> Awaitable: (if not already loaded) and executes the behavior tree, publishing periodic feedback. """ - # pylint: disable=too-many-statements + # pylint: disable=too-many-statements, too-many-nested-blocks, too-many-branches # This is the main execution loop. goal_uuid = "".join(format(x, "02x") for x in goal_handle.goal_id.uuid) diff --git a/ada_feeding/scripts/dummy_ft_sensor.py b/ada_feeding/scripts/dummy_ft_sensor.py index a9d8de93..15f33108 100755 --- a/ada_feeding/scripts/dummy_ft_sensor.py +++ b/ada_feeding/scripts/dummy_ft_sensor.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a node, DummyForceTorqueSensor, which publishes sample data to mimic the force-torque sensor on the robot. By setting parameters, users can @@ -11,7 +14,7 @@ - Run the node: `ros2 run ada_feeding dummy_ft_sensor` - Subscribe to the sensor data: `ros2 topic echo /wireless_ft/ftSensor3` - Turn the sensor off: `ros2 param set /dummy_ft_sensor is_on False` -- Start publishing zero-variance data: +- Start publishing zero-variance data: `ros2 param set /dummy_ft_sensor std [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]` - Start publishing data where one dimension is zero-variance: `ros2 param set /dummy_ft_sensor std [0.0, 0.1, 0.1, 0.1, 0.1, 0.1]` diff --git a/ada_feeding/scripts/joint_state_latency.py b/ada_feeding/scripts/joint_state_latency.py index 8af3ad76..bbee2953 100755 --- a/ada_feeding/scripts/joint_state_latency.py +++ b/ada_feeding/scripts/joint_state_latency.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines a barebones ROS2 node that subscribes to the /joint_states topic and logs the latency between when the message was published and when it was diff --git a/ada_feeding/scripts/robot_state_service.py b/ada_feeding/scripts/robot_state_service.py index bdadb4fb..d39b7879 100755 --- a/ada_feeding/scripts/robot_state_service.py +++ b/ada_feeding/scripts/robot_state_service.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ A node that subscribes to the joint states, keeps track of the most up-to-date state for each joint, and exposes a service of type GetRobotState.srv that diff --git a/ada_feeding/scripts/save_image.py b/ada_feeding/scripts/save_image.py index 4ec35896..5d1d3a8e 100755 --- a/ada_feeding/scripts/save_image.py +++ b/ada_feeding/scripts/save_image.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a node that exposes a ROS service that saves the latest RGB image and depth image from the RealSense, at the parameter-specified filepath. diff --git a/ada_feeding/tests/__init__.py b/ada_feeding/tests/__init__.py index e69de29b..94cfe0b8 100644 --- a/ada_feeding/tests/__init__.py +++ b/ada_feeding/tests/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. diff --git a/ada_feeding/tests/helpers.py b/ada_feeding/tests/helpers.py index b7545f2e..e76db836 100644 --- a/ada_feeding/tests/helpers.py +++ b/ada_feeding/tests/helpers.py @@ -1,4 +1,6 @@ -#!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines unit tests for the eventually_swiss idiom. """ @@ -178,7 +180,7 @@ def check_termination_new_statuses( descriptor: str = "", ) -> None: """ - Checkes that `terminate` either has not been called on the behavior, or + Checks that `terminate` either has not been called on the behavior, or that it has been called with the correct new status. Parameters diff --git a/ada_feeding/tests/test_eventually_swiss.py b/ada_feeding/tests/test_eventually_swiss.py old mode 100644 new mode 100755 index b5a5e8c0..cd0a2faa --- a/ada_feeding/tests/test_eventually_swiss.py +++ b/ada_feeding/tests/test_eventually_swiss.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines unit tests for the eventually_swiss idiom. """ diff --git a/ada_feeding/tests/test_scoped_behavior.py b/ada_feeding/tests/test_scoped_behavior.py old mode 100644 new mode 100755 index 131629a1..e174c884 --- a/ada_feeding/tests/test_scoped_behavior.py +++ b/ada_feeding/tests/test_scoped_behavior.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines unit tests for the scoped_behaviour idiom. """ @@ -13,6 +16,7 @@ # Local imports from ada_feeding.idioms import scoped_behavior + from .helpers import ( TickCounterWithTerminateTimestamp, check_count_status, diff --git a/ada_feeding_action_select/ada_feeding_action_select/__init__.py b/ada_feeding_action_select/ada_feeding_action_select/__init__.py index e69de29b..94cfe0b8 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/__init__.py +++ b/ada_feeding_action_select/ada_feeding_action_select/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. diff --git a/ada_feeding_action_select/ada_feeding_action_select/adapters/__init__.py b/ada_feeding_action_select/ada_feeding_action_select/adapters/__init__.py index 9028f987..1e5f1c79 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/adapters/__init__.py +++ b/ada_feeding_action_select/ada_feeding_action_select/adapters/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains code that adapts incoming sensor info to context and posthoc context. diff --git a/ada_feeding_action_select/ada_feeding_action_select/adapters/base_adapters.py b/ada_feeding_action_select/ada_feeding_action_select/adapters/base_adapters.py index f1e881bc..6233a6e6 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/adapters/base_adapters.py +++ b/ada_feeding_action_select/ada_feeding_action_select/adapters/base_adapters.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines an abstract class (and defaults) for generating context and posthoc vectors from input data. diff --git a/ada_feeding_action_select/ada_feeding_action_select/adapters/color_adapter.py b/ada_feeding_action_select/ada_feeding_action_select/adapters/color_adapter.py index fd64d4d4..998f3e5a 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/adapters/color_adapter.py +++ b/ada_feeding_action_select/ada_feeding_action_select/adapters/color_adapter.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the SPANet context adapter. @@ -13,9 +15,9 @@ from overrides import override # Local imports -from ada_feeding_action_select.helpers import logger from ada_feeding_msgs.msg import Mask from ada_feeding_perception.helpers import ros_msg_to_cv2_image +from ada_feeding_action_select.helpers import logger from .base_adapters import ContextAdapter diff --git a/ada_feeding_action_select/ada_feeding_action_select/adapters/hapticnet_adapter.py b/ada_feeding_action_select/ada_feeding_action_select/adapters/hapticnet_adapter.py index 0ecf292b..507cb7c8 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/adapters/hapticnet_adapter.py +++ b/ada_feeding_action_select/ada_feeding_action_select/adapters/hapticnet_adapter.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the HapticNet context adapter. diff --git a/ada_feeding_action_select/ada_feeding_action_select/adapters/models/__init__.py b/ada_feeding_action_select/ada_feeding_action_select/adapters/models/__init__.py index 9fe2a857..8f960a65 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/adapters/models/__init__.py +++ b/ada_feeding_action_select/ada_feeding_action_select/adapters/models/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This package contains code that works with the SPANet featurizer. """ diff --git a/ada_feeding_action_select/ada_feeding_action_select/adapters/models/hapticnet.py b/ada_feeding_action_select/ada_feeding_action_select/adapters/models/hapticnet.py index ddae7788..b3a0623f 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/adapters/models/hapticnet.py +++ b/ada_feeding_action_select/ada_feeding_action_select/adapters/models/hapticnet.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the HapticNet Mostly copied from: diff --git a/ada_feeding_action_select/ada_feeding_action_select/adapters/models/spanet.py b/ada_feeding_action_select/ada_feeding_action_select/adapters/models/spanet.py index 6fc801aa..2f8920bf 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/adapters/models/spanet.py +++ b/ada_feeding_action_select/ada_feeding_action_select/adapters/models/spanet.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the SPANet featurizer. Mostly copied from: diff --git a/ada_feeding_action_select/ada_feeding_action_select/adapters/spanet_adapter.py b/ada_feeding_action_select/ada_feeding_action_select/adapters/spanet_adapter.py index 5c52bb5d..2f8400a8 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/adapters/spanet_adapter.py +++ b/ada_feeding_action_select/ada_feeding_action_select/adapters/spanet_adapter.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the SPANet context adapter. @@ -16,9 +18,9 @@ import torch # Local imports -from ada_feeding_action_select.helpers import logger from ada_feeding_msgs.msg import Mask from ada_feeding_perception.helpers import ros_msg_to_cv2_image +from ada_feeding_action_select.helpers import logger from .models import SPANetConfig, SPANet from .base_adapters import ContextAdapter diff --git a/ada_feeding_action_select/ada_feeding_action_select/helpers.py b/ada_feeding_action_select/ada_feeding_action_select/helpers.py index b4b85930..e18c94ed 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/helpers.py +++ b/ada_feeding_action_select/ada_feeding_action_select/helpers.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines a number of helper functions that are reused throughout ada_feeding_action_select. diff --git a/ada_feeding_action_select/ada_feeding_action_select/policies/__init__.py b/ada_feeding_action_select/ada_feeding_action_select/policies/__init__.py index 892f278a..9301a44e 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/policies/__init__.py +++ b/ada_feeding_action_select/ada_feeding_action_select/policies/__init__.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains policies for selecting actions based on context (visual and posthoc). diff --git a/ada_feeding_action_select/ada_feeding_action_select/policies/base_policies.py b/ada_feeding_action_select/ada_feeding_action_select/policies/base_policies.py index 338c1484..0af7236a 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/policies/base_policies.py +++ b/ada_feeding_action_select/ada_feeding_action_select/policies/base_policies.py @@ -1,7 +1,9 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ -This module defines an abstract class (and defaults) for +This module defines an abstract class (and defaults) for selecting an action based on a policy. """ diff --git a/ada_feeding_action_select/ada_feeding_action_select/policies/color_policy.py b/ada_feeding_action_select/ada_feeding_action_select/policies/color_policy.py index 91ba6e24..a37b1935 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/policies/color_policy.py +++ b/ada_feeding_action_select/ada_feeding_action_select/policies/color_policy.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines a policy that chooses an action based on the color of the mask. diff --git a/ada_feeding_action_select/ada_feeding_action_select/policies/linear_policies.py b/ada_feeding_action_select/ada_feeding_action_select/policies/linear_policies.py index 2952d5e5..28b819fe 100644 --- a/ada_feeding_action_select/ada_feeding_action_select/policies/linear_policies.py +++ b/ada_feeding_action_select/ada_feeding_action_select/policies/linear_policies.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines concrete linear policy classes. """ @@ -16,8 +18,8 @@ import numpy.typing as npt # Local imports -from ada_feeding_action_select.helpers import get_action_library, logger from ada_feeding_msgs.msg import AcquisitionSchema +from ada_feeding_action_select.helpers import get_action_library, logger from .base_policies import Policy diff --git a/ada_feeding_action_select/ada_feeding_action_select/policy_service.py b/ada_feeding_action_select/ada_feeding_action_select/policy_service.py old mode 100644 new mode 100755 index 2e689724..32353ebd --- a/ada_feeding_action_select/ada_feeding_action_select/policy_service.py +++ b/ada_feeding_action_select/ada_feeding_action_select/policy_service.py @@ -1,5 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines a node that launches a 2 ROS2 services. This service implement AcquisitionSelect and AcquisitionReport. @@ -24,11 +27,11 @@ import torch # Internal imports +from ada_feeding_msgs.srv import AcquisitionSelect, AcquisitionReport from ada_feeding.helpers import import_from_string from ada_feeding_action_select.helpers import register_logger from ada_feeding_action_select.policies import Policy from ada_feeding_action_select.adapters import ContextAdapter, PosthocAdapter -from ada_feeding_msgs.srv import AcquisitionSelect, AcquisitionReport class PolicyServices(Node): @@ -163,7 +166,7 @@ def _declare_parameters(self): def _init_checkpoints_record(self, context_cls: type, posthoc_cls: type) -> None: """ - Seperate logic for checkpoint and data record + Separate logic for checkpoint and data record """ # pylint: disable=too-many-branches @@ -486,7 +489,7 @@ def report_callback_work( # TODO: Consider making get_kwargs an ada_feeding helper def get_kwargs(self, kws_root: str, kwarg_root: str) -> Dict: """ - Pull variable keyward arguments from ROS2 parameter server. + Pull variable keyword arguments from ROS2 parameter server. Needed because RCL does not allow dictionary params. Parameters diff --git a/ada_feeding_action_select/config/acquisition_library.yaml b/ada_feeding_action_select/config/acquisition_library.yaml index 456e4f62..b1c50dd0 100644 --- a/ada_feeding_action_select/config/acquisition_library.yaml +++ b/ada_feeding_action_select/config/acquisition_library.yaml @@ -1,4 +1,7 @@ -# This file was modifed by modify_ac_library.py +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +# This file was modified by modify_ac_library.py actions: - ext_angular: - 0.0 diff --git a/ada_feeding_action_select/config/acquisition_library.yaml.orig b/ada_feeding_action_select/config/acquisition_library.yaml.orig index 0fef73bf..fdf9e1c3 100644 --- a/ada_feeding_action_select/config/acquisition_library.yaml.orig +++ b/ada_feeding_action_select/config/acquisition_library.yaml.orig @@ -101,7 +101,7 @@ actions: ext_duration: 0.3900365829467773 ext_force: 50.0 ext_torque: 4.0 - - ## 3 (Clean VS0 w/ Angle Aproach) + - ## 3 (Clean VS0 w/ Angle Approach) pre_pos: [-0.042610413948224124,0.08381169451702956,0.06859996598659442] pre_quat: [0.0549787, 0.9943871, -0.0646521, 0.0631807] pre_offset: [0.0, 0.0, -0.03] diff --git a/ada_feeding_action_select/config/color_map.yaml b/ada_feeding_action_select/config/color_map.yaml index 7566aa34..e04b7ba7 100644 --- a/ada_feeding_action_select/config/color_map.yaml +++ b/ada_feeding_action_select/config/color_map.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Map of RGB colors to Action index color_map: - # Default diff --git a/ada_feeding_action_select/config/full_acquisition_library.yaml b/ada_feeding_action_select/config/full_acquisition_library.yaml index 695545d2..719d3f60 100644 --- a/ada_feeding_action_select/config/full_acquisition_library.yaml +++ b/ada_feeding_action_select/config/full_acquisition_library.yaml @@ -1,4 +1,7 @@ -# This file was modifed by modify_ac_library.py +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +# This file was modified by modify_ac_library.py actions: - ext_angular: - -0.4 diff --git a/ada_feeding_action_select/config/full_acquisition_library.yaml.orig b/ada_feeding_action_select/config/full_acquisition_library.yaml.orig index 34b0495d..02e6c059 100644 --- a/ada_feeding_action_select/config/full_acquisition_library.yaml.orig +++ b/ada_feeding_action_select/config/full_acquisition_library.yaml.orig @@ -169,7 +169,7 @@ actions: ext_duration: 0.7900252342224122 ext_force: 50.0 ext_torque: 4.0 - - ## 7 (Clean VS0 w/ Angle Aproach) + - ## 7 (Clean VS0 w/ Angle Approach) pre_pos: [-0.042610413948224124,0.08381169451702956,0.06859996598659442] pre_quat: [0.0549787, 0.9943871, -0.0646521, 0.0631807] pre_offset: [0.0, 0.0, -0.03] diff --git a/ada_feeding_action_select/config/policies.yaml b/ada_feeding_action_select/config/policies.yaml index 059b9eb1..1596a365 100644 --- a/ada_feeding_action_select/config/policies.yaml +++ b/ada_feeding_action_select/config/policies.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + ## Cross-Policy Variables # RCL *NEEDS* to add aliases # See: https://github.com/personalrobotics/ada_feeding/issues/79 @@ -27,7 +30,7 @@ ada_feeding_action_select: #posthoc_class: ada_feeding_action_select.adapters.HapticNetPosthoc #posthoc_kws: # - checkpoint # Path relative to share data directory - # - n_features + # - n_features # - gpu_index #posthoc_kwargs: # checkpoint: checkpoint/adapter/hapticnet_ckpt.pth @@ -77,7 +80,7 @@ ada_feeding_action_select: random: policy_class: ada_feeding_action_select.policies.RandomLinearPolicy - kws: + kws: - library - lambda_l2 - lambda_posthoc_damp @@ -90,7 +93,7 @@ ada_feeding_action_select: random_noposthoc: policy_class: ada_feeding_action_select.policies.RandomLinearPolicy - kws: + kws: - library - lambda_l2 - lambda_posthoc_damp @@ -103,7 +106,7 @@ ada_feeding_action_select: greedy: policy_class: ada_feeding_action_select.policies.GreedyLinearPolicy - kws: + kws: - library - lambda_l2 - lambda_posthoc_damp @@ -116,7 +119,7 @@ ada_feeding_action_select: greedy_noposthoc: policy_class: ada_feeding_action_select.policies.GreedyLinearPolicy - kws: + kws: - library - lambda_l2 - lambda_posthoc_damp @@ -129,7 +132,7 @@ ada_feeding_action_select: egreedy: policy_class: ada_feeding_action_select.policies.EpsilonGreedyLinearPolicy - kws: + kws: - library - lambda_l2 - lambda_posthoc_damp @@ -144,7 +147,7 @@ ada_feeding_action_select: egreedy_noposthoc: policy_class: ada_feeding_action_select.policies.EpsilonGreedyLinearPolicy - kws: + kws: - library - lambda_l2 - lambda_posthoc_damp @@ -159,7 +162,7 @@ ada_feeding_action_select: linucb: policy_class: ada_feeding_action_select.policies.LinUCBPolicy - kws: + kws: - library - lambda_l2 - lambda_posthoc_damp @@ -174,7 +177,7 @@ ada_feeding_action_select: linucb_noposthoc: policy_class: ada_feeding_action_select.policies.LinUCBPolicy - kws: + kws: - library - lambda_l2 - lambda_posthoc_damp diff --git a/ada_feeding_action_select/launch/ada_feeding_action_select_launch.xml b/ada_feeding_action_select/launch/ada_feeding_action_select_launch.xml index c99f3abf..b838c05c 100644 --- a/ada_feeding_action_select/launch/ada_feeding_action_select_launch.xml +++ b/ada_feeding_action_select/launch/ada_feeding_action_select_launch.xml @@ -1,3 +1,8 @@ + + diff --git a/ada_feeding_action_select/scripts/modify_ac_library.py b/ada_feeding_action_select/scripts/modify_ac_library.py index e312c0e0..75555d44 100755 --- a/ada_feeding_action_select/scripts/modify_ac_library.py +++ b/ada_feeding_action_select/scripts/modify_ac_library.py @@ -1,5 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ Test script for modifying the acquisition library """ @@ -82,7 +85,7 @@ def main(in_fname: str, out_fname: str): print(f"Writing to: {out_fname}") data["actions"] = actions with open(out_fname, "w", encoding="utf-8") as file: - file.write("# This file was modifed by modify_ac_library.py\n") + file.write("# This file was modified by modify_ac_library.py\n") yaml.dump(data, file) diff --git a/ada_feeding_action_select/setup.py b/ada_feeding_action_select/setup.py index a8f2bd3d..6b40abed 100644 --- a/ada_feeding_action_select/setup.py +++ b/ada_feeding_action_select/setup.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + from setuptools import find_packages, setup import os from glob import glob diff --git a/ada_feeding_action_select/test/test_copyright.py b/ada_feeding_action_select/test/test_copyright.py index ceffe896..b15eb480 100644 --- a/ada_feeding_action_select/test/test_copyright.py +++ b/ada_feeding_action_select/test/test_copyright.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ada_feeding_action_select/test/test_pep257.py b/ada_feeding_action_select/test/test_pep257.py index a2c3deb8..ff1f907d 100644 --- a/ada_feeding_action_select/test/test_pep257.py +++ b/ada_feeding_action_select/test/test_pep257.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ada_feeding_msgs/action/ActivateController.action b/ada_feeding_msgs/action/ActivateController.action index 6a2b76b6..4ca19b64 100644 --- a/ada_feeding_msgs/action/ActivateController.action +++ b/ada_feeding_msgs/action/ActivateController.action @@ -1,7 +1,7 @@ # This action type "inherits" from Trigger in that it has all the fields as # Trigger, and more. -# What controller to activate. The action will deactive all other controllers. +# What controller to activate. The action will deactivate all other controllers. string controller_to_activate # Whether to also re-tare the force-torque sensor diff --git a/ada_feeding_msgs/msg/Mask.msg b/ada_feeding_msgs/msg/Mask.msg index b1e7679d..9379d818 100644 --- a/ada_feeding_msgs/msg/Mask.msg +++ b/ada_feeding_msgs/msg/Mask.msg @@ -1,4 +1,4 @@ -# This message is used to specify a single output of an image segmentation +# This message is used to specify a single output of an image segmentation # algorithm. # The axis-aligned bounding box that contains the segmented item @@ -19,6 +19,6 @@ float64 average_depth # An arbitrary ID that defines the segmented item string item_id -# A score that indicates how confident the segemntation algorithm is in +# A score that indicates how confident the segemntation algorithm is in # this mask. float64 confidence diff --git a/ada_feeding_msgs/srv/ModifyCollisionObject.srv b/ada_feeding_msgs/srv/ModifyCollisionObject.srv index 4e754398..201491f6 100644 --- a/ada_feeding_msgs/srv/ModifyCollisionObject.srv +++ b/ada_feeding_msgs/srv/ModifyCollisionObject.srv @@ -9,7 +9,7 @@ byte operation string object_id --- -# Whether or not it was succesfully added +# Whether or not it was successfully added bool success # Additional information diff --git a/ada_feeding_perception/README.md b/ada_feeding_perception/README.md index 91b8da1c..c9d236af 100644 --- a/ada_feeding_perception/README.md +++ b/ada_feeding_perception/README.md @@ -1,4 +1,5 @@ # Ada Feeding Perception + This code performs image segmentation using the [Segment Anything](https://github.com/facebookresearch/segment-anything). It takes an input image and a point as input and generates masks for the regions of interest in the image. ## Setup @@ -13,18 +14,18 @@ For running SegmentAnything test scripts (below), be sure to unzip `test/food_im 2. Source your workspace: `source install/setup.bash` 3. Run the perception nodes: `ros2 launch ada_feeding_perception ada_feeding_perception.launch.py` 4. Launch the motion nodes: - 1. Dummy nodes: `ros2 launch feeding_web_app_ros2_test feeding_web_app_dummy_nodes_launch.xml run_real_sense:=false run_face_detection:=false run_food_detection:=false` - 2. Real nodes: `ros2 launch ada_feeding ada_feeding_launch.xml` + 1. Dummy nodes: `ros2 launch feeding_web_app_ros2_test feeding_web_app_dummy_nodes_launch.xml run_real_sense:=false run_face_detection:=false run_food_detection:=false` + 2. Real nodes: `ros2 launch ada_feeding ada_feeding_launch.xml` 5. Launch the RealSense node: - 1. Dummy nodes: `ros2 launch feeding_web_app_ros2_test feeding_web_app_dummy_nodes_launch.xml run_motion:=false run_face_detection:=false run_food_detection:=false` - 1. NOTE: SegmentFromPoint will no longer work with only the dummy RealSense node, since the dummy RealSense only publishes a color image, whereas SegmentFromPoint also expects a depth image. - 2. Real nodes: - 1. SSH into the `nano` user of `nano`: On a pre-configured lab computer, this should be `ssh nano`. Else, look here for [the IP address of nano](https://github.com/personalrobotics/pr_docs/wiki/Networking-and-SSH-Information). - 2. On `nano`: - 1. `ros2config` (this takes several seconds) - 2. `ros2 launch realsense2_camera rs_launch.py rgb_camera.profile:='640,480,30' depth_module.profile:='640,480,30' align_depth.enable:='true' initial_reset:='true'`. See here for [a complete list of params](https://github.com/IntelRealSense/realsense-ros/blob/ros2-development/realsense2_camera/launch/rs_launch.py). **Note that `nano` must be connected to the same WiFi network as the computer running any ros2 nodes that subscribe to it.** - 3. (To visualize the camera stream, run `ros2 run rviz2 rviz2`) - 4. **NOTE**: For the compressed depth stream, we found that `png_level` of 1 works best, to get compression while not slowing down FPS too much. Due to a quirk of our eye-in-hand setup, we hardcode this parameter directly in the compressed depth transport; thus, the code does not automatically set that parameter. Be sure to set the parameter yourself! + 1. Dummy nodes: `ros2 launch feeding_web_app_ros2_test feeding_web_app_dummy_nodes_launch.xml run_motion:=false run_face_detection:=false run_food_detection:=false` + 1. NOTE: SegmentFromPoint will no longer work with only the dummy RealSense node, since the dummy RealSense only publishes a color image, whereas SegmentFromPoint also expects a depth image. + 2. Real nodes: + 1. SSH into the `nano` user of `nano`: On a pre-configured lab computer, this should be `ssh nano`. Else, look here for [the IP address of nano](https://github.com/personalrobotics/pr_docs/wiki/Networking-and-SSH-Information). + 2. On `nano`: + 1. `ros2config` (this takes several seconds) + 2. `ros2 launch realsense2_camera rs_launch.py rgb_camera.profile:='640,480,30' depth_module.profile:='640,480,30' align_depth.enable:='true' initial_reset:='true'`. See here for [a complete list of params](https://github.com/IntelRealSense/realsense-ros/blob/ros2-development/realsense2_camera/launch/rs_launch.py). **Note that `nano` must be connected to the same WiFi network as the computer running any ros2 nodes that subscribe to it.** + 3. (To visualize the camera stream, run `ros2 run rviz2 rviz2`) + 4. **NOTE**: For the compressed depth stream, we found that `png_level` of 1 works best, to get compression while not slowing down FPS too much. Due to a quirk of our eye-in-hand setup, we hardcode this parameter directly in the compressed depth transport; thus, the code does not automatically set that parameter. Be sure to set the parameter yourself! 6. Launch the web app ([instructions here](https://github.com/personalrobotics/feeding_web_interface/tree/main/feedingwebapp)) ## Food Segmentation @@ -34,6 +35,7 @@ Food segmentation currently uses SegmentAnything's base model (`vit_b`) and has ### Benchmarking Food Segmentation Speed Below are rough numbers that were taken by running the food segmentation action server and accessing the web app on the same machine, starting the timer when we click a point on the web app, and ending the timer when the masks render in the web app. In other words, it includes all delays but web latency. + - On LoveLace's GPU, it takes ~0.6s. - On LoveLace's CPU, it takes ~4s. - On an Ubuntu VM (4 core, 8GB of RAM), it takes ~10s. @@ -47,6 +49,7 @@ There are several options for testing food segmentation: A `test_sam.py` convenience script is provided to run images through SegmentAnything without needing ROS. This script can be useful to ensure that SegmentAnything is working on your computer, or to benchmark how long SegmentAnything takes without additional delays. To run this script, do: + - `cd ada_feeding_perception` - `python3 test_sam.py --input_image ../test/food_img/sete_00100.jpg --input_point [800,500]` @@ -79,11 +82,13 @@ Launch the web app along with all the other nodes (real or dummy) as documented ## Parameters ### `segment_from_point` + - `model_name` (string, required): the name of the SegmentAnything checkpoint to use. You can see the options [here](https://github.com/facebookresearch/segment-anything#model-checkpoints). - `model_base_url` (string, required): The base URL where the model is hosted. Again, this came from the links [here](https://github.com/facebookresearch/segment-anything#model-checkpoints). - `n_contender_masks` (int, optional, default 3): The number of contender masks to return. ### `test_segment_from_point` + - `mode` (string, required): Either `offline` or `online`. In `offline` it segments a list of images stored on file and points that are passed in as parameters. In `online`, it displays a live stream and lets users click a point to specify what point to seed the segmentation with. - `action_server_name` (string, required): The name of the action server to call. - `image_topic` (string, required): The name of the image topic that the action server subscribes to. @@ -99,28 +104,31 @@ Our eye-in-hand Food-on-Fork Detection node and training/testing infrastructure 1. **Developing a new food-on-fork detector**: Create a subclass of `FoodOnForkDetector` that implements all of the abstractmethods. Note that as of now, a model does not have access to a real-time TF Buffer during test time; hence, **all transforms that the model relies on must be static**. 2. **Gather the dataset**: Because this node uses the eye-in-hand camera, it is sensitive to the relative pose between the camera and the fork. If you are using PRL's robot, [the dataset collected in early 2024](https://drive.google.com/drive/folders/1hNciBOmuHKd67Pw6oAvj_iN_rY1M8ZV0?usp=drive_link) may be sufficient. Otherwise, you should collect your own dataset: - 1. The dataset should consist of a series of ROS2 bags, each recording the following: (a) the aligned depth to color image topic; (b) the color image topic; (c) the camera info topic (we assume it is the same for both); and (d) the TF topic(s). - 2. We recorded three types of bags: (a) bags where the robot was going through the motions of feeding without food on the fork and without the fork nearing a person or plate; (b) the same as above but with food on the fork; and (c) bags where the robot was acquiring and feeding a bite to someone. We used the first two types of bags for training, and the third type of bag for evaluation. - 3. All ROS2 bags should be in the same directory, with a file `bags_metadata.csv` at the top-level of that directory. - 4. `bags_metadata.csv` contains the following columns: `rosbag_name` (str), `time_from_start` (float), `food_on_fork` (0/1), `arm_moving` (0/1). The file only needs rows for timestamps when one or both of the latter columns change; for intermediate timestamps, it is assumed that they stay the same. - 5. To generate `bags_metadata.csv`, we recommend launching RVIZ, adding your depth and/or RGB image topic, and playing back the bag. e.g., - 1. `ros2 run rviz2 rviz2 --ros-args -p use_sim_time:=true` - 2. `ros2 bag play 2024_03_01_two_bites_3 --clock` - 3. Pause and play the rosbag script when food foes on/off the fork, and when the arm starts/stops moving, and populate `bags_metadata.csv` accordingly (elapsed time since bag start should be visible at the bottom of RVIZ2). + 1. The dataset should consist of a series of ROS2 bags, each recording the following: (a) the aligned depth to color image topic; (b) the color image topic; (c) the camera info topic (we assume it is the same for both); and (d) the TF topic(s). + 2. We recorded three types of bags: (a) bags where the robot was going through the motions of feeding without food on the fork and without the fork nearing a person or plate; (b) the same as above but with food on the fork; and (c) bags where the robot was acquiring and feeding a bite to someone. We used the first two types of bags for training, and the third type of bag for evaluation. + 3. All ROS2 bags should be in the same directory, with a file `bags_metadata.csv` at the top-level of that directory. + 4. `bags_metadata.csv` contains the following columns: `rosbag_name` (str), `time_from_start` (float), `food_on_fork` (0/1), `arm_moving` (0/1). The file only needs rows for timestamps when one or both of the latter columns change; for intermediate timestamps, it is assumed that they stay the same. + 5. To generate `bags_metadata.csv`, we recommend launching RVIZ, adding your depth and/or RGB image topic, and playing back the bag. e.g., + 1. `ros2 run rviz2 rviz2 --ros-args -p use_sim_time:=true` + 2. `ros2 bag play 2024_03_01_two_bites_3 --clock` + 3. Pause and play the rosbag script when food foes on/off the fork, and when the arm starts/stops moving, and populate `bags_metadata.csv` accordingly (elapsed time since bag start should be visible at the bottom of RVIZ2). 3. **Train/test the model on offline data**: We provide a flexible Python script, `food_on_fork_train_test.py`, to train, test, and/or compare one-or-more food-on-fork models. To use it, first ensure you have built and sourced your workspace, and you are in the directory that contains the script (e.g., `cd ~/colcon_ws/src/ada_feeding/ada_feeding_perception/ada_feeding_perception`). To enable flexible use, the script has **many** command-line arguments; we recommend you read their descriptions with `python3 food_on_fork_train_test.py -h`. For reference, we include the command we used to train our model below: - ``` - python3 food_on_fork_train_test.py --model-classes '{"distance_no_fof_detector_with_filters": "ada_feeding_perception.food_on_fork_detectors.FoodOnForkDistanceToNoFOFDetector"}' --model-kwargs '{"distance_no_fof_detector_with_filters": {"camera_matrix": [614.5933227539062, 0.0, 312.1358947753906, 0.0, 614.6914672851562, 223.70831298828125, 0.0, 0.0, 1.0], "min_distance": 0.001}}' --lower-thresh 0.25 --upper-thresh 0.75 --train-set-size 0.5 --crop-top-left 344 272 --crop-bottom-right 408 336 --depth-min-mm 310 --depth-max-mm 340 --rosbags-select 2024_03_01_no_fof 2024_03_01_no_fof_1 2024_03_01_no_fof_2 2024_03_01_no_fof_3 2024_03_01_no_fof_4 2024_03_01_fof_cantaloupe_1 2024_03_01_fof_cantaloupe_2 2024_03_01_fof_cantaloupe_3 2024_03_01_fof_strawberry_1 2024_03_01_fof_strawberry_2 2024_03_01_fof_strawberry_3 2024_02_29_no_fof 2024_02_29_fof_cantaloupe 2024_02_29_fof_strawberry --seed 42 --temporal-window-size 5 --spatial-num-pixels 10 - ``` + ``` + python3 food_on_fork_train_test.py --model-classes '{"distance_no_fof_detector_with_filters": "ada_feeding_perception.food_on_fork_detectors.FoodOnForkDistanceToNoFOFDetector"}' --model-kwargs '{"distance_no_fof_detector_with_filters": {"camera_matrix": [614.5933227539062, 0.0, 312.1358947753906, 0.0, 614.6914672851562, 223.70831298828125, 0.0, 0.0, 1.0], "min_distance": 0.001}}' --lower-thresh 0.25 --upper-thresh 0.75 --train-set-size 0.5 --crop-top-left 344 272 --crop-bottom-right 408 336 --depth-min-mm 310 --depth-max-mm 340 --rosbags-select 2024_03_01_no_fof 2024_03_01_no_fof_1 2024_03_01_no_fof_2 2024_03_01_no_fof_3 2024_03_01_no_fof_4 2024_03_01_fof_cantaloupe_1 2024_03_01_fof_cantaloupe_2 2024_03_01_fof_cantaloupe_3 2024_03_01_fof_strawberry_1 2024_03_01_fof_strawberry_2 2024_03_01_fof_strawberry_3 2024_02_29_no_fof 2024_02_29_fof_cantaloupe 2024_02_29_fof_strawberry --seed 42 --temporal-window-size 5 --spatial-num-pixels 10 + ``` + Note that we trained our model on data where the fork either had or didn't have food the whole time, and didn't near any objects (e.g., the plate or the user's mouth). (Also, note that not all the above ROS2 bags are necessary; we've trained accurate detectors with half of them.) We then did an offline evaluation of the model on bags of actual feeding data: - ``` - python3 food_on_fork_train_test.py --model-classes '{"distance_no_fof_detector_with_filters": "ada_feeding_perception.food_on_fork_detectors.FoodOnForkDistanceToNoFOFDetector"}' --model-kwargs '{"distance_no_fof_detector_with_filters": {"camera_matrix": [614.5933227539062, 0.0, 312.1358947753906, 0.0, 614.6914672851562, 223.70831298828125, 0.0, 0.0, 1.0], "min_distance": 0.001}}' --lower-thresh 0.25 --upper-thresh 0.75 --train-set-size 0.5 --crop-top-left 308 248 --crop-bottom-right 436 332 --depth-min-mm 310 --depth-max-mm 340 --rosbags-select 2024_03_01_two_bites 2024_03_01_two_bites_2 2024_03_01_two_bites_3 2024_02_29_two_bites --seed 42 --temporal-window-size 5 --spatial-num-pixels 10 --no-train - ``` -4. **Test the model on online data**: First, copy the parameters you used when training your model, as well as the filename of the saved model, to `config/food_on_fork_detection.yaml`. Re-build and source your workspace. - 1. **Live Robot**: - 1. Launch the robot as usual; the `ada_feeding_perception`launchfile will launch food-on-fork detection. - 2. Toggle food-on-fork detection on: `ros2 service call /toggle_food_on_fork_detection std_srvs/srv/SetBool "{data: true}"` - 3. Echo the output of food-on-fork detection: `ros2 topic echo /food_on_fork_detection` - 2. **ROS2 bag data**: - 1. Launch perception: `ros2 launch ada_feeding_perception ada_feeding_perception.launch.py` - 2. Toggle food-on-fork detection on and echo the output of food-on-fork detection, as documented above. - 4. Launch RVIZ and play back a ROS2 bag, as documented above. + +``` +python3 food_on_fork_train_test.py --model-classes '{"distance_no_fof_detector_with_filters": "ada_feeding_perception.food_on_fork_detectors.FoodOnForkDistanceToNoFOFDetector"}' --model-kwargs '{"distance_no_fof_detector_with_filters": {"camera_matrix": [614.5933227539062, 0.0, 312.1358947753906, 0.0, 614.6914672851562, 223.70831298828125, 0.0, 0.0, 1.0], "min_distance": 0.001}}' --lower-thresh 0.25 --upper-thresh 0.75 --train-set-size 0.5 --crop-top-left 308 248 --crop-bottom-right 436 332 --depth-min-mm 310 --depth-max-mm 340 --rosbags-select 2024_03_01_two_bites 2024_03_01_two_bites_2 2024_03_01_two_bites_3 2024_02_29_two_bites --seed 42 --temporal-window-size 5 --spatial-num-pixels 10 --no-train +``` + +4. **Test the model on online data**: First, copy the parameters you used when training your model, as well as the filename of the saved model, to `config/food_on_fork_detection.yaml`. Re-build and source your workspace. + 1. **Live Robot**: + 1. Launch the robot as usual; the `ada_feeding_perception`launchfile will launch food-on-fork detection. + 2. Toggle food-on-fork detection on: `ros2 service call /toggle_food_on_fork_detection std_srvs/srv/SetBool "{data: true}"` + 3. Echo the output of food-on-fork detection: `ros2 topic echo /food_on_fork_detection` + 2. **ROS2 bag data**: + 1. Launch perception: `ros2 launch ada_feeding_perception ada_feeding_perception.launch.py` + 2. Toggle food-on-fork detection on and echo the output of food-on-fork detection, as documented above. + 3. Launch RVIZ and play back a ROS2 bag, as documented above. diff --git a/ada_feeding_perception/ada_feeding_perception/__init__.py b/ada_feeding_perception/ada_feeding_perception/__init__.py index e69de29b..94cfe0b8 100644 --- a/ada_feeding_perception/ada_feeding_perception/__init__.py +++ b/ada_feeding_perception/ada_feeding_perception/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. diff --git a/ada_feeding_perception/ada_feeding_perception/ada_feeding_perception_node.py b/ada_feeding_perception/ada_feeding_perception/ada_feeding_perception_node.py index f891de5f..ca15b2b5 100755 --- a/ada_feeding_perception/ada_feeding_perception/ada_feeding_perception_node.py +++ b/ada_feeding_perception/ada_feeding_perception/ada_feeding_perception_node.py @@ -1,3 +1,7 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains the ADAFeedingPerceptionNode class, which is used as a component of all perception nodes in the ADA Feeding project. Specifically, by storing all @@ -22,6 +26,9 @@ # Local imports +# pylint: disable=duplicate-code +# Many perception nodes have similar subscribers/publishers. + class ADAFeedingPerceptionNode(Node): """ @@ -167,7 +174,8 @@ def main(args=None): Launch the ROS node and spin. """ # Import the necessary modules - # pylint: disable=import-outside-toplevel + # pylint: disable=import-outside-toplevel, cyclic-import + # We don't need to worry about the cyclic import because this is inside main(). from ada_feeding_perception.face_detection import FaceDetectionNode from ada_feeding_perception.food_on_fork_detection import FoodOnForkDetectionNode from ada_feeding_perception.segment_from_point import SegmentFromPointNode diff --git a/ada_feeding_perception/ada_feeding_perception/depth_post_processors.py b/ada_feeding_perception/ada_feeding_perception/depth_post_processors.py index a11a1eee..2fef12c7 100644 --- a/ada_feeding_perception/ada_feeding_perception/depth_post_processors.py +++ b/ada_feeding_perception/ada_feeding_perception/depth_post_processors.py @@ -1,4 +1,6 @@ -#!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a series of post-processors for depth images. """ diff --git a/ada_feeding_perception/ada_feeding_perception/face_detection.py b/ada_feeding_perception/ada_feeding_perception/face_detection.py index 6083cab5..897b0133 100755 --- a/ada_feeding_perception/ada_feeding_perception/face_detection.py +++ b/ada_feeding_perception/ada_feeding_perception/face_detection.py @@ -1,5 +1,8 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This file defines the FaceDetection class, which publishes the 3d PointStamped locations of the largest detected mouth with respect to camera_depth_optical_frame. @@ -27,7 +30,6 @@ from skspatial.objects import Plane, Points from std_srvs.srv import SetBool - # Local imports from ada_feeding_msgs.msg import FaceDetection from ada_feeding_perception.helpers import ( @@ -38,6 +40,9 @@ ) from ada_feeding_perception.ada_feeding_perception_node import ADAFeedingPerceptionNode +# pylint: disable=duplicate-code +# Many perception nodes have similar subscribers/publishers. + class DepthComputationMethod(Enum): """ @@ -138,7 +143,7 @@ def __init__( FaceDetection, "~/face_detection", 1 ) # Currently, RVIZ2 doesn't support visualization of CompressedImage - # (other than as part of a Camera). Hence, for vizualization purposes + # (other than as part of a Camera). Hence, for visualization purposes # this must be an Image. https://github.com/ros2/rviz/issues/738 self.publisher_image = self._node.create_publisher( CompressedImage, "~/face_detection_img/compressed", 1 diff --git a/ada_feeding_perception/ada_feeding_perception/food_on_fork_detection.py b/ada_feeding_perception/ada_feeding_perception/food_on_fork_detection.py index 4d27facf..27c4d2b4 100644 --- a/ada_feeding_perception/ada_feeding_perception/food_on_fork_detection.py +++ b/ada_feeding_perception/ada_feeding_perception/food_on_fork_detection.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a ROS2 node that: (a) takes in parameters specifying a FoodOnFork class to use and kwargs for the class's constructor; (b) exposes a ROS2 service to @@ -42,6 +45,9 @@ class to use and kwargs for the class's constructor; (b) exposes a ROS2 service post_processor_chain, ) +# pylint: disable=duplicate-code +# Many perception nodes have similar subscribers/publishers. + class FoodOnForkDetectionNode: """ diff --git a/ada_feeding_perception/ada_feeding_perception/food_on_fork_detectors.py b/ada_feeding_perception/ada_feeding_perception/food_on_fork_detectors.py index 03115a48..477f983a 100644 --- a/ada_feeding_perception/ada_feeding_perception/food_on_fork_detectors.py +++ b/ada_feeding_perception/ada_feeding_perception/food_on_fork_detectors.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This file contains an abstract class, FoodOnForkDetector, that takes in a single depth image and returns a confidence in [0,1] that there is food on the fork. @@ -182,7 +185,7 @@ def get_transforms(frames: List[Tuple[str, str]], tf_buffer: Buffer) -> npt.NDAr Returns ------- - transforms: The transforms (homogenous coordinates) that are necessary + transforms: The transforms (homogeneous coordinates) that are necessary for this classifier. Size (num_transforms, 4, 4). Note that if the transform is not found, it will be a zero matrix. """ @@ -238,7 +241,7 @@ def fit( X: The depth images to train on. Size (num_images, height, width). y: The labels for the depth images. Size (num_images,). Must be one of the values enumerated in FoodOnForkLabel. - t: The transforms (homogenous coordinates) that are necessary for this + t: The transforms (homogeneous coordinates) that are necessary for this classifier. Size (num_images, num_transforms, 4, 4). Should be outputted by `get_transforms`. viz_save_dir: The directory to save visualizations to. If None, no @@ -285,7 +288,7 @@ def predict_proba( Parameters ---------- X: The depth images to predict on. - t: The transforms (homogenous coordinates) that are necessary for this + t: The transforms (homogeneous coordinates) that are necessary for this classifier. Size (num_images, num_transforms, 4, 4). Should be outputted by `get_transforms`. @@ -311,7 +314,7 @@ def predict( Parameters ---------- X: The depth images to predict on. - t: The transforms (homogenous coordinates) that are necessary for this + t: The transforms (homogeneous coordinates) that are necessary for this classifier. Size (num_images, num_transforms, 4, 4). Should be outputted by `get_transforms`. lower_thresh: The lower threshold for food on the fork. @@ -355,12 +358,12 @@ def overlay_debug_info(self, img: npt.NDArray, t: npt.NDArray) -> npt.NDArray: Parameters ---------- img: The depth image to overlay debug information onto. - t: The closest transforms (homogenous coordinates) to this image's timestamp. + t: The closest transforms (homogeneous coordinates) to this image's timestamp. Size (num_transforms, 4, 4). Should be outputted by `get_transforms`. Returns ------- - img_with_debug_info: The depth image with debug information overlayed. + img_with_debug_info: The depth image with debug information overlaid. """ # pylint: disable=unused-argument return img @@ -376,7 +379,7 @@ def visualize_img(self, img: npt.NDArray, t: npt.NDArray) -> None: Parameters ---------- img: The depth image to visualize. - t: The closest transforms (homogenous coordinates) to this image's timestamp. + t: The closest transforms (homogeneous coordinates) to this image's timestamp. Size (num_transforms, 4, 4). Should be outputted by `get_transforms`. """ # pylint: disable=unused-argument @@ -894,12 +897,12 @@ def overlay_debug_info(self, img: npt.NDArray, t: npt.NDArray) -> npt.NDArray: # This is done to make it clear what the camera matrix values are. # First, convert all no_fof_points back to the camera frame by applying - # the inverse of the homogenous transform t[0, :, :] - no_fof_points_homogenous = np.hstack( + # the inverse of the homogeneous transform t[0, :, :] + no_fof_points_homogeneous = np.hstack( [self.no_fof_points, np.ones((self.no_fof_points.shape[0], 1))] ) no_fof_points_camera = np.dot( - np.linalg.inv(t[0, :, :]), no_fof_points_homogenous.T + np.linalg.inv(t[0, :, :]), no_fof_points_homogeneous.T ).T[:, :3] # For every point in the no_fof_points, convert them back into (u,v) pixel diff --git a/ada_feeding_perception/ada_feeding_perception/food_on_fork_train_test.py b/ada_feeding_perception/ada_feeding_perception/food_on_fork_train_test.py index 84c9caa2..9ffaa742 100644 --- a/ada_feeding_perception/ada_feeding_perception/food_on_fork_train_test.py +++ b/ada_feeding_perception/ada_feeding_perception/food_on_fork_train_test.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This script takes in a variety of command line arguments and then trains and test a FoodOnForkDetector as configured by the arguments. Note that although this is not diff --git a/ada_feeding_perception/ada_feeding_perception/helpers.py b/ada_feeding_perception/ada_feeding_perception/helpers.py index a06133a5..9d0d4139 100644 --- a/ada_feeding_perception/ada_feeding_perception/helpers.py +++ b/ada_feeding_perception/ada_feeding_perception/helpers.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This file contains helper functions for the ada_feeding_perception package. """ @@ -21,8 +24,8 @@ try: from rosbags.typesys.types import ( sensor_msgs__msg__CompressedImage as rCompressedImage, - sensor_msgs__msg__Image as rImage, ) + from rosbags.typesys.types import sensor_msgs__msg__Image as rImage except (TypeError, ModuleNotFoundError) as err: rclpy.logging.get_logger("ada_feeding_perception_helpers").warn( "rosbags is not installed, or a wrong version is installed (needs 0.9.19). " @@ -31,7 +34,6 @@ from sensor_msgs.msg import CompressedImage, Image from skimage.morphology import flood_fill - # The fixed header that ROS2 Humble's compressed depth image transport plugin prepends to # the data. The exact value was empirically determined, but the below link shows the code # that prepends additional data: @@ -42,8 +44,10 @@ ) try: + # pylint: disable=invalid-name generic_image_type = Union[Image, rImage, CompressedImage, rCompressedImage] except NameError as _: + # pylint: disable=invalid-name # This only happens if rosbags wasn't imported, which is logged above. generic_image_type = Union[Image, CompressedImage] diff --git a/ada_feeding_perception/ada_feeding_perception/republisher.py b/ada_feeding_perception/ada_feeding_perception/republisher.py old mode 100644 new mode 100755 index 2aa178cb..c9738592 --- a/ada_feeding_perception/ada_feeding_perception/republisher.py +++ b/ada_feeding_perception/ada_feeding_perception/republisher.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the Republisher node, which republishes the messages from one topic to another, with user-configureable post-processors. diff --git a/ada_feeding_perception/ada_feeding_perception/segment_from_point.py b/ada_feeding_perception/ada_feeding_perception/segment_from_point.py old mode 100644 new mode 100755 index 235ed01d..6983e745 --- a/ada_feeding_perception/ada_feeding_perception/segment_from_point.py +++ b/ada_feeding_perception/ada_feeding_perception/segment_from_point.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This file defines the SegmentFromPointNode class, which launches an action server that takes in a seed point, segments the latest image with that seed @@ -42,6 +45,9 @@ ) from ada_feeding_perception.ada_feeding_perception_node import ADAFeedingPerceptionNode +# pylint: disable=duplicate-code +# Many perception nodes have similar subscribers/publishers. + class SegmentFromPointNode: """ @@ -335,7 +341,7 @@ def initialize_sam(self, model_name: str, model_path: str) -> None: Initialize all attributes needed for food segmentation with SAM. This includes loading the SAM, launching the action - server, and more. Note that we are guarenteed the model exists since + server, and more. Note that we are guaranteed the model exists since it was downloaded in the __init__ function of this class. Parameters @@ -372,7 +378,7 @@ def initialize_efficient_sam(self, model_name: str, model_path: str) -> None: Initialize all attributes needed for food segmentation with EfficientSAM. This includes loading the EfficientSAM model, launching the action - server, and more. Note that we are guarenteed the model exists since + server, and more. Note that we are guaranteed the model exists since it was downloaded in the __init__ function of this class. Parameters diff --git a/ada_feeding_perception/ada_feeding_perception/table_detection.py b/ada_feeding_perception/ada_feeding_perception/table_detection.py index a5789ac6..ddc16d16 100644 --- a/ada_feeding_perception/ada_feeding_perception/table_detection.py +++ b/ada_feeding_perception/ada_feeding_perception/table_detection.py @@ -1,6 +1,9 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This file defines the TableDetectionNode class, which publishes the 3D PoseStamped -location of the table with respect to camera_depth_optical_frame. +location of the table with respect to camera_depth_optical_frame. """ # Standard imports @@ -37,6 +40,9 @@ ) from ada_feeding_perception.ada_feeding_perception_node import ADAFeedingPerceptionNode +# pylint: disable=duplicate-code +# Many perception nodes have similar subscribers/publishers. + class TableDetectionNode: """ diff --git a/ada_feeding_perception/ada_feeding_perception/test_efficient_sam.py b/ada_feeding_perception/ada_feeding_perception/test_efficient_sam.py index 511dd93c..ca774590 100644 --- a/ada_feeding_perception/ada_feeding_perception/test_efficient_sam.py +++ b/ada_feeding_perception/ada_feeding_perception/test_efficient_sam.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This script demonstrates how to use the Segment Anything Model (SAM) to segment an object from an image given a point inside the object. Unlike the diff --git a/ada_feeding_perception/ada_feeding_perception/test_sam.py b/ada_feeding_perception/ada_feeding_perception/test_sam.py index 5bb4c659..cd0c60c9 100644 --- a/ada_feeding_perception/ada_feeding_perception/test_sam.py +++ b/ada_feeding_perception/ada_feeding_perception/test_sam.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This script demonstrates how to use the Segment Anything Model (SAM) to segment an object from an image given a point inside the object. Unlike the diff --git a/ada_feeding_perception/ada_feeding_perception/test_segment_from_point.py b/ada_feeding_perception/ada_feeding_perception/test_segment_from_point.py old mode 100644 new mode 100755 index 07d0e197..0e0672f1 --- a/ada_feeding_perception/ada_feeding_perception/test_segment_from_point.py +++ b/ada_feeding_perception/ada_feeding_perception/test_segment_from_point.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This file defines the TestSegmentFromPoint class, which is a node that can be used to test the SegmentFromPoint action server. diff --git a/ada_feeding_perception/config/face_detection.yaml b/ada_feeding_perception/config/face_detection.yaml index adcb0417..c9bf0f86 100644 --- a/ada_feeding_perception/config/face_detection.yaml +++ b/ada_feeding_perception/config/face_detection.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. face_detection: ros__parameters: @@ -28,4 +31,4 @@ ada_feeding_perception: # The desired length of the depth image buffer depth_buffer_size: 30 # The rate at which to detect and publish detected faces - face_detection_rate_hz: 3 \ No newline at end of file + face_detection_rate_hz: 3 diff --git a/ada_feeding_perception/config/food_on_fork_detection.yaml b/ada_feeding_perception/config/food_on_fork_detection.yaml index 23d9d1b5..85e3f8ae 100644 --- a/ada_feeding_perception/config/food_on_fork_detection.yaml +++ b/ada_feeding_perception/config/food_on_fork_detection.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. food_on_fork_detection: ros__parameters: @@ -35,7 +38,7 @@ food_on_fork_detection: viz_lower_thresh: 0.25 viz_upper_thresh: 0.75 - # The offset to add to no-food-on-fork points. This is useful to acocunt for scenarios like when + # The offset to add to no-food-on-fork points. This is useful to account for scenarios like when # the fork gets bent or the camera extrinsics changes slightly. # NOTE: The fact that food-on-fork detection is sensitive to such small offsets is, in itself, # an indication that it needs to be improved. @@ -78,7 +81,7 @@ ada_feeding_perception: viz_lower_thresh: 0.25 viz_upper_thresh: 0.75 - # The offset to add to no-food-on-fork points. This is useful to acocunt for scenarios like when + # The offset to add to no-food-on-fork points. This is useful to account for scenarios like when # the fork gets bent or the camera extrinsics changes slightly. # NOTE: The fact that food-on-fork detection is sensitive to such small offsets is, in itself, # an indication that it needs to be improved. diff --git a/ada_feeding_perception/config/republisher.yaml b/ada_feeding_perception/config/republisher.yaml index 9a4ff6ac..bc16a0d6 100644 --- a/ada_feeding_perception/config/republisher.yaml +++ b/ada_feeding_perception/config/republisher.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. republisher: ros__parameters: diff --git a/ada_feeding_perception/config/segment_from_point.yaml b/ada_feeding_perception/config/segment_from_point.yaml index 86fd247f..28a27a9c 100644 --- a/ada_feeding_perception/config/segment_from_point.yaml +++ b/ada_feeding_perception/config/segment_from_point.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. segment_from_point: ros__parameters: diff --git a/ada_feeding_perception/config/table_detection.yaml b/ada_feeding_perception/config/table_detection.yaml index 4633c0b8..fb2fb24c 100644 --- a/ada_feeding_perception/config/table_detection.yaml +++ b/ada_feeding_perception/config/table_detection.yaml @@ -1,10 +1,13 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. table_detection: ros__parameters: # The rate at which to publish the pose of the detected table table_detection_rate_hz: 3.0 - # A boolean to determine whether to visualize plate detection + # A boolean to determine whether to visualize plate detection table_detection_viz: True # NOTE: If using the combined perception node, be very careful to ensure no name clashes of parameters! @@ -13,5 +16,5 @@ ada_feeding_perception: # The rate at which to publish the pose of the detected table table_detection_rate_hz: 3.0 - # A boolean to determine whether to visualize plate detection + # A boolean to determine whether to visualize plate detection table_detection_viz: True diff --git a/ada_feeding_perception/config/test_segment_from_point.yaml b/ada_feeding_perception/config/test_segment_from_point.yaml index df503cf4..165a54d6 100644 --- a/ada_feeding_perception/config/test_segment_from_point.yaml +++ b/ada_feeding_perception/config/test_segment_from_point.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. test_segment_from_point: ros__parameters: @@ -6,12 +9,12 @@ test_segment_from_point: # When the user clicks a point on the image, it will call the # SegmentFromPoint action server and display the result(s). # offline: The node will read the images and points specified in the params. - # It will call the SegmentFromPoint action server and save the results. + # It will call the SegmentFromPoint action server and save the results. mode: online # online or offline - + # The name of the action server to call action_server_name: /SegmentFromPoint - + # Note that all directories are relative to the `base_dir` parameter, passed # in by the launchfile. offline: # Params for offline mode @@ -68,4 +71,4 @@ test_segment_from_point: - 210 - 750 - 200 - - 800 \ No newline at end of file + - 800 diff --git a/ada_feeding_perception/launch/ada_feeding_perception.launch.py b/ada_feeding_perception/launch/ada_feeding_perception.launch.py index 270a2df0..6fa6fa5e 100755 --- a/ada_feeding_perception/launch/ada_feeding_perception.launch.py +++ b/ada_feeding_perception/launch/ada_feeding_perception.launch.py @@ -1,5 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ Launch file for ada_feeding_perception """ diff --git a/ada_feeding_perception/launch/test_food_segmentation_launch.xml b/ada_feeding_perception/launch/test_food_segmentation_launch.xml index bd5cdb41..2bc82b9d 100644 --- a/ada_feeding_perception/launch/test_food_segmentation_launch.xml +++ b/ada_feeding_perception/launch/test_food_segmentation_launch.xml @@ -1,3 +1,8 @@ + + @@ -7,4 +12,4 @@ - \ No newline at end of file + diff --git a/ada_feeding_perception/setup.py b/ada_feeding_perception/setup.py index 0587dc09..9d6af71f 100644 --- a/ada_feeding_perception/setup.py +++ b/ada_feeding_perception/setup.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + from glob import glob import os from setuptools import setup diff --git a/ada_feeding_perception/test/test_copyright.py b/ada_feeding_perception/test/test_copyright.py index ceffe896..b15eb480 100644 --- a/ada_feeding_perception/test/test_copyright.py +++ b/ada_feeding_perception/test/test_copyright.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ada_feeding_perception/test/test_flake8.py b/ada_feeding_perception/test/test_flake8.py index ee79f31a..2bc27938 100644 --- a/ada_feeding_perception/test/test_flake8.py +++ b/ada_feeding_perception/test/test_flake8.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Copyright 2017 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ada_feeding_perception/test/test_pep257.py b/ada_feeding_perception/test/test_pep257.py index a2c3deb8..ff1f907d 100644 --- a/ada_feeding_perception/test/test_pep257.py +++ b/ada_feeding_perception/test/test_pep257.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ada_planning_scene/ada_planning_scene/__init__.py b/ada_planning_scene/ada_planning_scene/__init__.py index e69de29b..94cfe0b8 100644 --- a/ada_planning_scene/ada_planning_scene/__init__.py +++ b/ada_planning_scene/ada_planning_scene/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. diff --git a/ada_planning_scene/ada_planning_scene/ada_planning_scene.py b/ada_planning_scene/ada_planning_scene/ada_planning_scene.py old mode 100644 new mode 100755 index 8a6bd49e..64293bf4 --- a/ada_planning_scene/ada_planning_scene/ada_planning_scene.py +++ b/ada_planning_scene/ada_planning_scene/ada_planning_scene.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains the main node for populating and maintaining ADA's planning scene. """ @@ -149,7 +152,7 @@ def __load_parameters(self): ) self.__base_frame = base_frame.value - # If all the collision objects have not been succesfully added to the + # If all the collision objects have not been successfully added to the # planning scene within this time, stop initialization. initialization_timeout_secs = self.declare_parameter( "initialization_timeout_secs", @@ -158,7 +161,7 @@ def __load_parameters(self): name="initialization_timeout_secs", type=ParameterType.PARAMETER_DOUBLE, description=( - "If all the collision objects have not been succesfully added to the " + "If all the collision objects have not been successfully added to the " "planning scene within this time, stop initialization." ), read_only=True, @@ -329,6 +332,7 @@ def cleanup(): if future.done(): response = future.result() if len(response.values) > 0: + # pylint: disable=attribute-defined-outside-init # If the parameter is set, that is the namespace to use if response.values[0].type == ParameterType.PARAMETER_STRING: self.__namespace_to_use = response.values[0].string_value @@ -388,7 +392,7 @@ def spin(node: Node, executor: MultiThreadedExecutor) -> None: """ try: rclpy.spin(node, executor=executor) - except rclpy._rclpy_pybind11.InvalidHandle: + except rclpy._rclpy_pybind11.InvalidHandle: # pylint: disable=protected-access # There is a known issue in rclpy where it doesn't properly handle destruction of # elements in the executor. # - https://github.com/ros2/rclpy/issues/1355 diff --git a/ada_planning_scene/ada_planning_scene/collision_object_manager.py b/ada_planning_scene/ada_planning_scene/collision_object_manager.py index ce3e34a1..9a4b73e2 100644 --- a/ada_planning_scene/ada_planning_scene/collision_object_manager.py +++ b/ada_planning_scene/ada_planning_scene/collision_object_manager.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the CollisionObjectManager class, which tracks the IDs of the (attached) collision objects in the planning scene and allows users to add collision @@ -224,7 +227,7 @@ def add_collision_objects( ------- True if the collision objects were successfully added, False otherwise. """ - # pylint: disable=too-many-arguments, too-many-branches, too-many-statements + # pylint: disable=too-many-arguments, too-many-branches, too-many-statements,too-many-locals # This is the main bread and butter of adding to the planning scene, # so is expected to be complex. self.__node.get_logger().info( @@ -386,7 +389,7 @@ def cleanup(): self.__collision_objects_per_batch.pop(batch_id) self.__attached_collision_objects_per_batch.pop(batch_id) - cleanup() + cleanup() # pylint: disable=duplicate-code return True def move_collision_objects( diff --git a/ada_planning_scene/ada_planning_scene/helpers.py b/ada_planning_scene/ada_planning_scene/helpers.py index b97e3af3..d266bbf8 100644 --- a/ada_planning_scene/ada_planning_scene/helpers.py +++ b/ada_planning_scene/ada_planning_scene/helpers.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains helper functions for populating and maintaining the planning scene. diff --git a/ada_planning_scene/ada_planning_scene/planning_scene_initializer.py b/ada_planning_scene/ada_planning_scene/planning_scene_initializer.py index b1218e8d..c7ea9eb2 100644 --- a/ada_planning_scene/ada_planning_scene/planning_scene_initializer.py +++ b/ada_planning_scene/ada_planning_scene/planning_scene_initializer.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module defines the PlanningSceneInitializer class, which reads the configuration for static mesh and primitive collision objects from the parameters and adds them to @@ -31,7 +34,7 @@ class PlanningSceneInitializer: """ # pylint: disable=too-few-public-methods - # This class only exists to intialize the planning scene, hence it only needs one + # This class only exists to initialize the planning scene, hence it only needs one # public method def __init__( diff --git a/ada_planning_scene/ada_planning_scene/update_from_face_detection.py b/ada_planning_scene/ada_planning_scene/update_from_face_detection.py index 50979fae..738a5257 100644 --- a/ada_planning_scene/ada_planning_scene/update_from_face_detection.py +++ b/ada_planning_scene/ada_planning_scene/update_from_face_detection.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains the UpdateFromFaceDetection class, which subscribes to the output of face detection, moves the head to the detected position, and scales the @@ -28,7 +31,6 @@ from ada_planning_scene.collision_object_manager import CollisionObjectManager from ada_planning_scene.helpers import CollisionObjectParams - # Define a namedtuple to store latest the joint state UpdateFromFaceDetectionParams = namedtuple( "UpdateFromFaceDetectionParams", @@ -208,10 +210,10 @@ def __load_parameters(self) -> None: try: # The object ID of the head in the planning scene head_object_id = self.__node.declare_parameter( - f"{namespace}.head_object_id", + f"face_detection.{namespace}.head_object_id", "head", descriptor=ParameterDescriptor( - name=f"{namespace}.head_object_id", + name=f"face_detection.{namespace}.head_object_id", type=ParameterType.PARAMETER_STRING, description=( "The object ID of the head in the planning scene. " @@ -222,16 +224,16 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: head_object_id = self.__node.get_parameter( - f"{namespace}.head_object_id" + f"face_detection.{namespace}.head_object_id" ) try: # The object ID of the body in the planning scene body_object_id = self.__node.declare_parameter( - f"{namespace}.body_object_id", + f"face_detection.{namespace}.body_object_id", "body", descriptor=ParameterDescriptor( - name=f"{namespace}.body_object_id", + name=f"face_detection.{namespace}.body_object_id", type=ParameterType.PARAMETER_STRING, description=( "The object ID of the body in the planning scene. " @@ -242,17 +244,17 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: body_object_id = self.__node.get_parameter( - f"{namespace}.body_object_id" + f"face_detection.{namespace}.body_object_id" ) try: # If the head is farther than this distance from the default position, # use the default position instead head_distance_threshold = self.__node.declare_parameter( - f"{namespace}.head_distance_threshold", + f"face_detection.{namespace}.head_distance_threshold", 0.5, descriptor=ParameterDescriptor( - name=f"{namespace}.head_distance_threshold", + name=f"face_detection.{namespace}.head_distance_threshold", type=ParameterType.PARAMETER_DOUBLE, description=( "Reject any mouth positions that are greater than the distance " @@ -263,16 +265,16 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: head_distance_threshold = self.__node.get_parameter( - f"{namespace}.head_distance_threshold" + f"face_detection.{namespace}.head_distance_threshold" ) try: # The TF frame to use for the mouth mouth_frame_id = self.__node.declare_parameter( - f"{namespace}.mouth_frame_id", + f"face_detection.{namespace}.mouth_frame_id", "mouth", descriptor=ParameterDescriptor( - name=f"{namespace}.mouth_frame_id", + name=f"face_detection.{namespace}.mouth_frame_id", type=ParameterType.PARAMETER_STRING, description=("The name of the frame to use for the mouth."), read_only=True, @@ -280,16 +282,16 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: mouth_frame_id = self.__node.get_parameter( - f"{namespace}.mouth_frame_id" + f"face_detection.{namespace}.mouth_frame_id" ) try: # Whether to move the body as well as the head update_body = self.__node.declare_parameter( - f"{namespace}.update_body", + f"face_detection.{namespace}.update_body", False, descriptor=ParameterDescriptor( - name=f"{namespace}.update_body", + name=f"face_detection.{namespace}.update_body", type=ParameterType.PARAMETER_BOOL, description=( "Whether to update the body as well as the head based on face detection." @@ -298,7 +300,9 @@ def __load_parameters(self) -> None: ), ) except ParameterAlreadyDeclaredException: - update_body = self.__node.get_parameter(f"{namespace}.update_body") + update_body = self.__node.get_parameter( + f"face_detection.{namespace}.update_body" + ) self.__namespace_to_params[namespace] = UpdateFromFaceDetectionParams( head_object_id=head_object_id.value, @@ -322,6 +326,9 @@ def __update_face_detection(self) -> None: fixed quaternion to the mouth center to create a pose. Finally, move the head in the planning scene to that pose. """ + # pylint: disable=too-many-locals + # One over is fine. + with self.__latest_face_detection_lock: if ( self.__latest_face_detection is None @@ -410,7 +417,7 @@ def __update_face_detection(self) -> None: # NOTE: Although in theory, publishing the mouth TF here can allow for visual servoing # to the mouth, in practice we have to make some assumptions about the mouth orientation # (e.g., the mouth is facing the fork). Those assumptions are made in the MoveToMouth tree, - # and would be overrided here if we published the mouth TF. + # and would be overridden here if we published the mouth TF. if self.__publish_mouth_tf: self.__tf_broadcaster.sendTransform( TransformStamped( diff --git a/ada_planning_scene/ada_planning_scene/update_from_table_detection.py b/ada_planning_scene/ada_planning_scene/update_from_table_detection.py index b876a492..b90942f0 100644 --- a/ada_planning_scene/ada_planning_scene/update_from_table_detection.py +++ b/ada_planning_scene/ada_planning_scene/update_from_table_detection.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains the UpdateFromTableDetection class, which updates the pose of the table based on the results of table detection. @@ -158,10 +161,10 @@ def __load_parameters(self) -> None: try: # The object ID of the table in the planning scene table_object_id = self.__node.declare_parameter( - f"{namespace}.table_object_id", + f"table_detection.{namespace}.table_object_id", "table", descriptor=ParameterDescriptor( - name=f"{namespace}.table_object_id", + name=f"table_detection.{namespace}.table_object_id", type=ParameterType.PARAMETER_STRING, description=( "The object ID of the table in the planning scene. " @@ -172,16 +175,16 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: table_object_id = self.__node.get_parameter( - f"{namespace}.table_object_id" + f"table_detection.{namespace}.table_object_id" ) try: # Where the origin of the table is expected to be relative to the detected point table_origin_offset = self.__node.declare_parameter( - f"{namespace}.table_origin_offset", + f"table_detection.{namespace}.table_origin_offset", [-0.20, -0.25, -0.79], # default value descriptor=ParameterDescriptor( - name=f"{namespace}.table_origin_offset", + name=f"table_detection.{namespace}.table_origin_offset", type=ParameterType.PARAMETER_DOUBLE_ARRAY, description=( "(x, y, z) values to add to the detected table pose (e.g., plate " @@ -192,17 +195,17 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: table_origin_offset = self.__node.get_parameter( - f"{namespace}.table_origin_offset" + f"table_detection.{namespace}.table_origin_offset" ) try: # If the detected table has moved more than this relative to the default, # publish the default pose instead. table_distance_threshold = self.__node.declare_parameter( - f"{namespace}.table_distance_threshold", + f"table_detection.{namespace}.table_distance_threshold", 0.5, # default value descriptor=ParameterDescriptor( - name=f"{namespace}.table_distance_threshold", + name=f"table_detection.{namespace}.table_distance_threshold", type=ParameterType.PARAMETER_DOUBLE, description=( "The threshold for the distance (m) between " @@ -214,17 +217,17 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: table_distance_threshold = self.__node.get_parameter( - f"{namespace}.table_distance_threshold" + f"table_detection.{namespace}.table_distance_threshold" ) try: # If the detected table has rotated more than this relative to the default, # publish the default pose instead. table_rotation_threshold = self.__node.declare_parameter( - f"{namespace}.table_rotation_threshold", + f"table_detection.{namespace}.table_rotation_threshold", np.pi / 6.0, # default value descriptor=ParameterDescriptor( - name=f"{namespace}.table_rotation_threshold", + name=f"table_detection.{namespace}.table_rotation_threshold", type=ParameterType.PARAMETER_DOUBLE, description=( "The threshold for the angular distance between " @@ -236,15 +239,15 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: table_rotation_threshold = self.__node.get_parameter( - f"{namespace}.table_rotation_threshold" + f"table_detection.{namespace}.table_rotation_threshold" ) try: disable_table_detection = self.__node.declare_parameter( - f"{namespace}.disable_table_detection", + f"table_detection.{namespace}.disable_table_detection", False, # default value descriptor=ParameterDescriptor( - name=f"{namespace}.disable_table_detection", + name=f"table_detection.{namespace}.disable_table_detection", type=ParameterType.PARAMETER_BOOL, description=( "Whether to disable table detection in this namespace." @@ -254,7 +257,7 @@ def __load_parameters(self) -> None: ) except ParameterAlreadyDeclaredException: disable_table_detection = self.__node.get_parameter( - f"{namespace}.disable_table_detection" + f"table_detection.{namespace}.disable_table_detection" ) # Store the parameters diff --git a/ada_planning_scene/ada_planning_scene/workspace_walls.py b/ada_planning_scene/ada_planning_scene/workspace_walls.py index 4a591cac..3d9f0012 100644 --- a/ada_planning_scene/ada_planning_scene/workspace_walls.py +++ b/ada_planning_scene/ada_planning_scene/workspace_walls.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains the class `WorkspaceWalls` for managing the workspace walls in ADA's planning scene. @@ -169,7 +172,7 @@ def __load_parameters(self): j_label = "x" if j == 0 else "y" if j == 1 else "z" # Get the offset for the wall in that direction - name = f"{namespace}.workspace_wall_margin_{j_label}_{i_label}" + name = f"workspace_walls.{namespace}.workspace_wall_margin_{j_label}_{i_label}" default = 0.1 margin = self.__node.declare_parameter( name, @@ -187,7 +190,7 @@ def __load_parameters(self): self.__workspace_wall_margins[namespace][i, j] = margin.value # Check whether the wall in that direction is disabled - name = f"{namespace}.disable_workspace_wall_{j_label}_{i_label}" + name = f"workspace_walls.{namespace}.disable_workspace_wall_{j_label}_{i_label}" disable = self.__node.declare_parameter( name, False, # default value @@ -348,7 +351,7 @@ def __load_parameters(self): self.__fixed_joint_values = self.__fixed_joint_values[:min_len] # The name of the articulated joints in the robot's full URDF. The order - # of these must match the order ot joints in the robot configuration parameters. + # of these must match the order of joints in the robot configuration parameters. articulated_joint_names = self.__node.declare_parameter( "articulated_joint_names", [ @@ -368,7 +371,7 @@ def __load_parameters(self): ) self.__articulated_joint_names = articulated_joint_names.value - def __get_homogenous_transform_in_base_frame( + def __get_homogeneous_transform_in_base_frame( self, position: Tuple[float, float, float], quat_xyzw: Tuple[float, float, float, float], @@ -377,7 +380,7 @@ def __get_homogenous_transform_in_base_frame( ) -> Optional[npt.NDArray]: """ Transforms the position and quaternion in frame_id into base_frame. Returns - The resulting pose represented as a homogenous transformation matrix. In other + The resulting pose represented as a homogeneous transformation matrix. In other words, the return value times (0, 0, 0, 1) is the position in the base frame. Parameters @@ -390,7 +393,7 @@ def __get_homogenous_transform_in_base_frame( Returns ------- - The homogenous transformation matrix that takes a point in the object's frame + The homogeneous transformation matrix that takes a point in the object's frame and converts it to the base frame. None if the transform fails. """ # Get the pose as a PoseStamped @@ -419,7 +422,7 @@ def __get_homogenous_transform_in_base_frame( self.__node.get_logger().error(f"Failed to transform the pose: {error}") return None - # Covert the pose to a homogenous transformation matrix + # Convert the pose to a homogeneous transformation matrix pose_matrix = quaternion_matrix( [ pose.pose.orientation.w, @@ -457,7 +460,7 @@ def __get_mesh_bounds( mesh = params.mesh # Get the transformation matrix - transform = self.__get_homogenous_transform_in_base_frame( + transform = self.__get_homogeneous_transform_in_base_frame( position=params.position, quat_xyzw=params.quat_xyzw, frame_id=params.frame_id, @@ -491,7 +494,7 @@ def __get_primitive_bounds( The bounds of the primitive object. """ # Get the transformation matrix - transform = self.__get_homogenous_transform_in_base_frame( + transform = self.__get_homogeneous_transform_in_base_frame( position=params.position, quat_xyzw=params.quat_xyzw, frame_id=params.frame_id, @@ -536,8 +539,8 @@ def __get_primitive_bounds( points = np.array(points) # Transform the points - points_homogenous = np.hstack([points, np.ones((points.shape[0], 1))]) - points_transformed = np.dot(transform, points_homogenous.T).T[:, :3] + points_homogeneous = np.hstack([points, np.ones((points.shape[0], 1))]) + points_transformed = np.dot(transform, points_homogeneous.T).T[:, :3] # Get the bounds as a (2, 3) array if params.primitive_type == SolidPrimitive.SPHERE: @@ -744,7 +747,7 @@ def cleanup(): # `yourdfpy` only allows loading URDF from file, so we bypass its default load. self.__robot_model = URDF(robot=URDF._parse_robot(xml_element=xml_root)) - cleanup() + cleanup() # pylint: disable=duplicate-code return True def __get_parameter_prefix( @@ -1139,7 +1142,7 @@ def initialize( self.__compute_object_bounds() # Load the robot's URDF. We do this in `initialize` as opposed to `__init__` - # because the MoveGroup has to be running to get the paramter. + # because the MoveGroup has to be running to get the parameter. if self.__use_robot_model: # Get the robot model (may take <= 10 secs) self.__node.get_logger().info("Loading robot model.") diff --git a/ada_planning_scene/config/ada_planning_scene.yaml b/ada_planning_scene/config/ada_planning_scene.yaml index 2ed17ac8..674cf743 100644 --- a/ada_planning_scene/config/ada_planning_scene.yaml +++ b/ada_planning_scene/config/ada_planning_scene.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # NOTE: You have to change this node name if you change the node name in the launchfile. ada_planning_scene: ros__parameters: @@ -7,7 +10,7 @@ ada_planning_scene: # available. initialization_timeout_secs: 60.0 - # The base frame. Workspace walls and dynamic position updates (e.g., face and + # The base frame. Workspace walls and dynamic position updates (e.g., face and # table) are published in this frame. Note that if using workspace walls, this # frame must match the base link in the full robot's URDF (note: not the move group). base_frame: root @@ -30,7 +33,7 @@ ada_planning_scene: ############################################################################ # Parameters related to the PlanningSceneInitializer class ############################################################################ - + seated: # list of objects to add to the planning scene. Before changing names, # check where else they are used (e.g., in `ada_feeding`). Ideally, the object_ids @@ -75,7 +78,7 @@ ada_planning_scene: # updated as the robot perceives the table. position: [0.08, -0.5, -0.56] quat_xyzw: [0.0, 0.0, 0.0, 1.0] - offsets: [-0.20, -0.25, -0.689] + offsets: [-0.20, -0.25, -0.689] frame_id: root # the frame_id that the pose is relative to # NOTE: If you change this you must also change the hardcoded initial pose in # bite transfer. @@ -139,7 +142,7 @@ ada_planning_scene: # updated as the robot perceives the table. position: [-0.5, -0.08, -0.56] quat_xyzw: [0.0, 0.0, -0.7071068, 0.7071068] - offsets: [-0.20, -0.25, -0.689] + offsets: [-0.20, -0.25, -0.689] frame_id: root # the frame_id that the pose is relative to # NOTE: If you change this you must also change the hardcoded initial pose in # bite transfer. @@ -175,7 +178,7 @@ ada_planning_scene: quat_xyzw: [0.0, 0.0, 0.0, 1.0] frame_id: root # the frame_id that the pose is relative to within_workspace_walls: False # whether to add workspace walls around the wheelchair - body: + body: filename: body_collision_in_bed.stl position: [-0.68, 0.4, -1.24] # should match the wheelchair position quat_xyzw: [0.0, 0.0, 0.0, 1.0] # should match the wheelchair orientation @@ -221,7 +224,7 @@ ada_planning_scene: quat_xyzw: [0.0, 0.0, 0.0, 1.0] frame_id: root # the frame_id that the pose is relative to within_workspace_walls: False # whether to add workspace walls around the wheelchair - body: + body: filename: body_collision_in_bed.stl position: [-0.58, 0.25, -1.09] # should match the wheelchair position quat_xyzw: [0.0, 0.0, 0.0, 1.0] # should match the wheelchair orientation @@ -258,21 +261,22 @@ ada_planning_scene: # Direction-specific margins for how far the workspace wall should be (default 0.1m) # from the nearest contained object in that direction. # Also, direction-specific flags to disable the workspace wall in that direction. - seated: - workspace_wall_margin_y_min: 0.2 # The min y direction needs a larger margin since the robot moves into that for acquisition - disable_workspace_wall_y_max: True # Remove the back wall, to be able to see in in RVIZ - seated_90: - workspace_wall_margin_y_min: 0.2 # The min y direction needs a larger margin since the robot moves into that for acquisition - disable_workspace_wall_y_max: True # Remove the back wall, to be able to see in in RVIZ - bedside: - disable_workspace_wall_y_min: True # Remove the back wall, to be able to see in in RVIZ - bedside_90: - disable_workspace_wall_y_min: True # Remove the back wall, to be able to see in in RVIZ - # disable_workspace_wall_y_max: True - # disable_workspace_wall_x_min: True - # disable_workspace_wall_x_max: True - # disable_workspace_wall_z_min: True - # disable_workspace_wall_z_max: True + workspace_walls: + seated: + workspace_wall_margin_y_min: 0.2 # The min y direction needs a larger margin since the robot moves into that for acquisition + disable_workspace_wall_y_max: True # Remove the back wall, to be able to see in in RVIZ + seated_90: + workspace_wall_margin_y_min: 0.2 # The min y direction needs a larger margin since the robot moves into that for acquisition + disable_workspace_wall_y_max: True # Remove the back wall, to be able to see in in RVIZ + bedside: + disable_workspace_wall_y_min: True # Remove the back wall, to be able to see in in RVIZ + bedside_90: + disable_workspace_wall_y_min: True # Remove the back wall, to be able to see in in RVIZ + # disable_workspace_wall_y_max: True + # disable_workspace_wall_x_min: True + # disable_workspace_wall_x_max: True + # disable_workspace_wall_z_min: True + # disable_workspace_wall_z_max: True # Whether the workspace walls should contain the robot's current configuration # at time of recomputation. @@ -322,47 +326,49 @@ ada_planning_scene: ############################################################################ # Reject any mouth poses that are greater than the distance threshold away from the default head position - seated: - head_object_id: head - head_distance_threshold: 0.5 # m - update_body: True - seated_90: - head_object_id: head - head_distance_threshold: 0.5 # m - update_body: True - bedside: - head_object_id: head - head_distance_threshold: 0.5 # m - update_body: False - bedside_90: - head_object_id: head - head_distance_threshold: 0.5 # m - update_body: False + face_detection: + seated: + head_object_id: head + head_distance_threshold: 0.5 # m + update_body: True + seated_90: + head_object_id: head + head_distance_threshold: 0.5 # m + update_body: True + bedside: + head_object_id: head + head_distance_threshold: 0.5 # m + update_body: False + bedside_90: + head_object_id: head + head_distance_threshold: 0.5 # m + update_body: False ############################################################################ # Parameters related to the UpdateFromTableDetectionpace class ############################################################################ - # - `table_origin_offset`: The offset values (in meters) for the center coordinates ([x, y, z]) + # - `table_origin_offset`: The offset values (in meters) for the center coordinates ([x, y, z]) # of the table. These values are used to translate the detected position into the table's origin. # - `table_quat_dist_thresh`: The threshold on the angular distance (in radians) between the default # table quaternion and the latest table quaternion to determine whether to reject or accept the # latest table quaternion. # - `table_pos_dist_thresh`: The threshold on the linear distance (in m) between the default table position # and the latest table position to determine whether to reject or accept the latest table position. - seated: - table_origin_offset: [-0.20, -0.25, -0.79] - table_quat_dist_thresh: 0.349 - table_pos_dist_thresh: 0.5 - seated_90: - table_origin_offset: [-0.10, -0.25, -0.79] - table_quat_dist_thresh: 0.349 - table_pos_dist_thresh: 0.5 - bedside: - table_origin_offset: [-0.23, 0.71, -1.0067] - table_quat_dist_thresh: 0.349 - table_pos_dist_thresh: 0.5 - bedside_90: - table_origin_offset: [-0.71, -0.23, -1.0067] - table_quat_dist_thresh: 0.349 - table_pos_dist_thresh: 0.5 + table_detection: + seated: + table_origin_offset: [-0.20, -0.25, -0.79] + table_quat_dist_thresh: 0.349 + table_pos_dist_thresh: 0.5 + seated_90: + table_origin_offset: [-0.10, -0.25, -0.79] + table_quat_dist_thresh: 0.349 + table_pos_dist_thresh: 0.5 + bedside: + table_origin_offset: [-0.23, 0.71, -1.0067] + table_quat_dist_thresh: 0.349 + table_pos_dist_thresh: 0.5 + bedside_90: + table_origin_offset: [-0.71, -0.23, -1.0067] + table_quat_dist_thresh: 0.349 + table_pos_dist_thresh: 0.5 diff --git a/ada_planning_scene/launch/ada_moveit_launch.xml b/ada_planning_scene/launch/ada_moveit_launch.xml index fde30ae3..597a459b 100644 --- a/ada_planning_scene/launch/ada_moveit_launch.xml +++ b/ada_planning_scene/launch/ada_moveit_launch.xml @@ -1,3 +1,8 @@ + + diff --git a/ada_planning_scene/setup.py b/ada_planning_scene/setup.py index 441822d4..17fa59f4 100644 --- a/ada_planning_scene/setup.py +++ b/ada_planning_scene/setup.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + from glob import glob import os from setuptools import setup diff --git a/ada_planning_scene/test/test_copyright.py b/ada_planning_scene/test/test_copyright.py index ceffe896..b15eb480 100644 --- a/ada_planning_scene/test/test_copyright.py +++ b/ada_planning_scene/test/test_copyright.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ada_planning_scene/test/test_flake8.py b/ada_planning_scene/test/test_flake8.py index ee79f31a..2bc27938 100644 --- a/ada_planning_scene/test/test_flake8.py +++ b/ada_planning_scene/test/test_flake8.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Copyright 2017 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ada_planning_scene/test/test_pep257.py b/ada_planning_scene/test/test_pep257.py index a2c3deb8..ff1f907d 100644 --- a/ada_planning_scene/test/test_pep257.py +++ b/ada_planning_scene/test/test_pep257.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + # Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/nano_bridge/README.md b/nano_bridge/README.md index 469a59bf..397e043a 100644 --- a/nano_bridge/README.md +++ b/nano_bridge/README.md @@ -4,4 +4,4 @@ This package is very particular to our computer setting, intended to account for `sender` and `receiver` are generic nodes that can serialize any number of ROS topics of any type into one ROS topic. However, pickle serialization can take ~0.2s, which can slow down the publisher. Hence, `sender_compressed_image` and `receiver_compressed_image` can combine any number of CompressedImage topics into one, but can only do it for those topics (and it doesn't have the overhead of serialization). -A future TODO is to create a generic way to handle cases where the input types are the same and are different, and to create a unified sender and receiver class. \ No newline at end of file +A future TODO is to create a generic way to handle cases where the input types are the same and are different, and to create a unified sender and receiver class. diff --git a/nano_bridge/config/nano_bridge.yaml b/nano_bridge/config/nano_bridge.yaml index f20d13df..0ba3ff85 100644 --- a/nano_bridge/config/nano_bridge.yaml +++ b/nano_bridge/config/nano_bridge.yaml @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + sender: ros__parameters: topic_names: diff --git a/nano_bridge/launch/receiver.launch.xml b/nano_bridge/launch/receiver.launch.xml index 49ec1931..e9f4335f 100644 --- a/nano_bridge/launch/receiver.launch.xml +++ b/nano_bridge/launch/receiver.launch.xml @@ -1,3 +1,8 @@ + + @@ -14,5 +19,5 @@ - + diff --git a/nano_bridge/launch/sender.launch.xml b/nano_bridge/launch/sender.launch.xml index ab3b6d65..7c751d78 100644 --- a/nano_bridge/launch/sender.launch.xml +++ b/nano_bridge/launch/sender.launch.xml @@ -1,3 +1,8 @@ + + diff --git a/nano_bridge/msg/CompressedImage.msg b/nano_bridge/msg/CompressedImage.msg index 8e5176fb..4cab47e5 100644 --- a/nano_bridge/msg/CompressedImage.msg +++ b/nano_bridge/msg/CompressedImage.msg @@ -2,4 +2,4 @@ string topic # The data -sensor_msgs/CompressedImage data \ No newline at end of file +sensor_msgs/CompressedImage data diff --git a/nano_bridge/nano_bridge/__init__.py b/nano_bridge/nano_bridge/__init__.py index e69de29b..94cfe0b8 100644 --- a/nano_bridge/nano_bridge/__init__.py +++ b/nano_bridge/nano_bridge/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. diff --git a/nano_bridge/nano_bridge/helpers.py b/nano_bridge/nano_bridge/helpers.py index 5207bbf2..f3db1a9e 100644 --- a/nano_bridge/nano_bridge/helpers.py +++ b/nano_bridge/nano_bridge/helpers.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ Helper functions for the nano_bridge package. """ diff --git a/nano_bridge/nano_bridge/receiver.py b/nano_bridge/nano_bridge/receiver.py index 3b96c2df..f5e8dc67 100755 --- a/nano_bridge/nano_bridge/receiver.py +++ b/nano_bridge/nano_bridge/receiver.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a node, ReceiverNode, which subscribes to a ByteMultiArray topic published by SenderNode and republishes the messages to the original topics, diff --git a/nano_bridge/nano_bridge/receiver_compressed_image.py b/nano_bridge/nano_bridge/receiver_compressed_image.py index a9fc66bf..546a0b1f 100755 --- a/nano_bridge/nano_bridge/receiver_compressed_image.py +++ b/nano_bridge/nano_bridge/receiver_compressed_image.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a node, ReceiverCompressedImageNode, which subscribes to a topic published by SenderCompressedImageNode and republishes the messages to the original topics, @@ -23,7 +26,10 @@ from rclpy.publisher import Publisher # Local imports -from nano_bridge.msg import CompressedImage as CompressedImageInput +# pylint: disable=import-error, no-name-in-module +from nano_bridge.msg import ( + CompressedImage as CompressedImageInput, +) class ReceiverCompressedImageNode(Node): diff --git a/nano_bridge/nano_bridge/sender.py b/nano_bridge/nano_bridge/sender.py index 6f6f2b3f..64e1653e 100755 --- a/nano_bridge/nano_bridge/sender.py +++ b/nano_bridge/nano_bridge/sender.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a node, SenderNode, which subscribes to topics as specified by its parameters and publishes it to a single topic of type ByteMultiArray. We diff --git a/nano_bridge/nano_bridge/sender_compressed_image.py b/nano_bridge/nano_bridge/sender_compressed_image.py index c918cc4a..06063a91 100755 --- a/nano_bridge/nano_bridge/sender_compressed_image.py +++ b/nano_bridge/nano_bridge/sender_compressed_image.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) 2024, Personal Robotics Laboratory +# License: BSD 3-Clause. See LICENSE.md file in root directory. + """ This module contains a node, SenderCompressedImageNode, which subscribes to topics as specified by its parameters and publishes it to a single topic. @@ -21,7 +24,10 @@ from rclpy.time import Time # Local imports -from nano_bridge.msg import CompressedImage as CompressedImageOutput +# pylint: disable=import-error, no-name-in-module +from nano_bridge.msg import ( + CompressedImage as CompressedImageOutput, +) class SenderCompressedImageNode(Node): diff --git a/requirements.txt b/requirements.txt index ad48ff3a..06cd4360 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ -pyrealsense2 -overrides -sounddevice -scikit-spatial git+https://github.com/facebookresearch/segment-anything.git git+https://github.com/yformer/EfficientSAM.git +overrides +pyrealsense2 +scikit-spatial +sounddevice diff --git a/run_camera.sh b/run_camera.sh index 6092153a..719919ef 100755 --- a/run_camera.sh +++ b/run_camera.sh @@ -4,5 +4,3 @@ source ~/Workspace/camera_ros2/install/setup.bash export ROS_DOMAIN_ID=42 export RMW_IMPLEMENTATION=rmw_cyclonedds_cpp ros2 launch realsense2_camera rs_launch.py rgb_camera.profile:=640,480,15 depth_module.profile:=640,480,15 align_depth.enable:=true initial_reset:=true publish_tf:=false - - diff --git a/run_nano_bridge.sh b/run_nano_bridge.sh index e83c2985..85f91b15 100755 --- a/run_nano_bridge.sh +++ b/run_nano_bridge.sh @@ -4,5 +4,3 @@ source ~/Workspace/camera_ros2/install/setup.bash export ROS_DOMAIN_ID=42 export RMW_IMPLEMENTATION=rmw_cyclonedds_cpp ros2 launch nano_bridge sender.launch.xml - - diff --git a/start.py b/start.py old mode 100644 new mode 100755 index c4184c79..8230f448 --- a/start.py +++ b/start.py @@ -4,8 +4,8 @@ This module starts all the screen sessions to run the ada_feeding demo. """ -import asyncio import argparse +import asyncio import getpass import os import sys diff --git a/start_nano.py b/start_nano.py index aa5d24cc..a9ea7803 100755 --- a/start_nano.py +++ b/start_nano.py @@ -4,8 +4,8 @@ This module starts all the screen sessions to run the ada_feeding demo. """ -import asyncio import argparse +import asyncio import getpass import os import sys