diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5c3f8ed018..8673a90b5e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ ci: autoupdate_schedule: quarterly repos: - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v19.1.1 + rev: v19.1.6 hooks: - id: clang-format types_or: [] @@ -27,7 +27,7 @@ repos: doc/doxygen-awesome.* )$ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.9 + rev: v0.8.6 hooks: - id: ruff - id: ruff-format diff --git a/bindings/python/pinocchio/shortcuts.py b/bindings/python/pinocchio/shortcuts.py index 8d01baec0d..949794a2e6 100644 --- a/bindings/python/pinocchio/shortcuts.py +++ b/bindings/python/pinocchio/shortcuts.py @@ -5,7 +5,8 @@ ## In this file, some shortcuts are provided ## -from typing import Tuple +# TODO: Remove when 20.04 is not supported +from __future__ import annotations from . import WITH_HPP_FCL, WITH_HPP_FCL_BINDINGS from . import pinocchio_pywrap_default as pin @@ -15,7 +16,7 @@ def buildModelsFromUrdf( filename, *args, **kwargs -) -> Tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]: +) -> tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]: """Parse the URDF file given in input and return a Pinocchio Model followed by corresponding GeometryModels of types specified by geometry_types, in the same order as listed. Arguments: - filename - name of the urdf file to load @@ -63,7 +64,7 @@ def _buildModelsFromUrdf( verbose=False, meshLoader=None, geometry_types=None, -) -> Tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]: +) -> tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]: if geometry_types is None: geometry_types = [pin.GeometryType.COLLISION, pin.GeometryType.VISUAL] @@ -119,7 +120,7 @@ def createDatas(*models): def buildModelsFromSdf( filename, *args, **kwargs -) -> Tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]: +) -> tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]: """Parse the Sdf file given in input and return a Pinocchio Model and a list of Constraint Models, followed by corresponding GeometryModels of types specified by geometry_types, in the same order as listed. Arguments: - filename - name of the urdf file to load diff --git a/bindings/python/pinocchio/utils.py b/bindings/python/pinocchio/utils.py index 2401d142f1..79f5c0244e 100644 --- a/bindings/python/pinocchio/utils.py +++ b/bindings/python/pinocchio/utils.py @@ -96,17 +96,17 @@ def fromListToVectorOfString(items): __all__ = [ - "np", - "npl", "eye", - "zero", - "rand", + "fromListToVectorOfString", "isapprox", + "matrixToRpy", "mprint", + "np", "npToTTuple", "npToTuple", + "npl", + "rand", "rotate", "rpyToMatrix", - "matrixToRpy", - "fromListToVectorOfString", + "zero", ] diff --git a/bindings/python/pinocchio/visualize/meshcat_visualizer.py b/bindings/python/pinocchio/visualize/meshcat_visualizer.py index c41997a767..f99ae91c1a 100644 --- a/bindings/python/pinocchio/visualize/meshcat_visualizer.py +++ b/bindings/python/pinocchio/visualize/meshcat_visualizer.py @@ -1,6 +1,9 @@ +# TODO: Remove when 20.04 is not supported +from __future__ import annotations + import warnings from pathlib import Path -from typing import ClassVar, List +from typing import ClassVar import numpy as np @@ -21,9 +24,10 @@ # DaeMeshGeometry import xml.etree.ElementTree as Et -from typing import Any, Dict, Optional, Set, Union +from typing import Any -MsgType = Dict[str, Union[str, bytes, bool, float, "MsgType"]] +# TODO: Remove quote when 20.04 is not supported +MsgType = "dict[str, Union[str, bytes, bool, float, 'MsgType']]" try: import hppfcl @@ -110,7 +114,7 @@ def lower(self, object_data: Any) -> MsgType: } class DaeMeshGeometry(mg.ReferenceSceneElement): - def __init__(self, dae_path: str, cache: Optional[Set[str]] = None) -> None: + def __init__(self, dae_path: str, cache: set[str] | None = None) -> None: """Load Collada files with texture images. Inspired from https://gist.github.com/danzimmerman/a392f8eadcf1166eb5bd80e3922dbdc5 @@ -131,7 +135,7 @@ def __init__(self, dae_path: str, cache: Optional[Set[str]] = None) -> None: self.dae_raw = text_file.read() # Parse the image resource in Collada file - img_resource_paths: List[Path] = [] + img_resource_paths: list[Path] = [] img_lib_element = Et.parse(dae_path).find( "{http://www.collada.org/2005/11/COLLADASchema}library_images" ) @@ -143,7 +147,7 @@ def __init__(self, dae_path: str, cache: Optional[Set[str]] = None) -> None: ] # Convert textures to data URL for Three.js ColladaLoader to load them - self.img_resources: Dict[str, str] = {} + self.img_resources: dict[str, str] = {} for img_path in img_resource_paths: img_key = str(img_path) # Return empty string if already in cache @@ -164,7 +168,7 @@ def __init__(self, dae_path: str, cache: Optional[Set[str]] = None) -> None: img_uri = f"data:image/png;base64,{img_data.decode('utf-8')}" self.img_resources[img_key] = img_uri - def lower(self) -> Dict[str, Any]: + def lower(self) -> dict[str, Any]: """Pack data into a dictionary of the format that must be passed to `Visualizer.window.send`. """ @@ -1112,10 +1116,10 @@ def drawFrameVelocities(self, frame_id: int, v_scale=0.2, color=FRAME_VEL_COLOR) def _draw_vectors_from_frame( self, - vecs: List[np.ndarray], - frame_ids: List[int], - vec_names: List[str], - colors: List[int], + vecs: list[np.ndarray], + frame_ids: list[int], + vec_names: list[str], + colors: list[int], ): """Draw vectors extending from given frames.""" import meshcat.geometry as mg diff --git a/development/scripts/misc/common_symbols.py b/development/scripts/misc/common_symbols.py index ea759d6e0c..ea4f5b0512 100755 --- a/development/scripts/misc/common_symbols.py +++ b/development/scripts/misc/common_symbols.py @@ -5,10 +5,9 @@ import itertools import pathlib import subprocess -import typing -def generate_symbols(shared_library: pathlib.Path) -> typing.Set[str]: +def generate_symbols(shared_library: pathlib.Path) -> set[str]: # Show symbol # -D: Dynamic # -C: Demangled diff --git a/doc/d-practical-exercises/src/continuous.py b/doc/d-practical-exercises/src/continuous.py index 3b1e5ed7ff..5419eab7ad 100644 --- a/doc/d-practical-exercises/src/continuous.py +++ b/doc/d-practical-exercises/src/continuous.py @@ -15,16 +15,16 @@ import tflearn from pendulum import Pendulum -### --- Random seed +# --- Random seed RANDOM_SEED = int((time.time() % 10) * 1000) -print("Seed = %d" % RANDOM_SEED) +print(f"Seed = {RANDOM_SEED}") np.random.seed(RANDOM_SEED) tf.set_random_seed(RANDOM_SEED) random.seed(RANDOM_SEED) n_init = tflearn.initializations.truncated_normal(seed=RANDOM_SEED) u_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003, seed=RANDOM_SEED) -### --- Hyper paramaters +# --- Hyper paramaters NEPISODES = 100 # Max training steps NSTEPS = 100 # Max episode length QVALUE_LEARNING_RATE = 0.001 # Base learning rate for the Q-value Network @@ -35,13 +35,13 @@ BATCH_SIZE = 64 # Number of points to be fed in stochastic gradient NH1 = NH2 = 250 # Hidden layer size -### --- Environment +# --- Environment env = Pendulum(1) # Continuous pendulum env.withSinCos = True # State is dim-3: (cosq,sinq,qdot) ... NX = env.nobs # ... training converges with q,qdot with 2x more neurones. NU = env.nu # Control is dim-1: joint torque -### --- Q-value and policy networks +# --- Q-value and policy networks class QValueNetwork: @@ -63,7 +63,8 @@ def __init__(self): self.x = x # Network state input in Q(x,u) self.u = u # Network control input in Q(x,u) self.qvalue = qvalue # Network output - self.variables = tf.trainable_variables()[nvars:] # Variables to be trained + # Variables to be trained + self.variables = tf.trainable_variables()[nvars:] self.hidens = [netx1, netx2, netu1, netu2] # Hidden layers for debug def setupOptim(self): @@ -75,7 +76,8 @@ def setupOptim(self): self.qref = qref # Reference Q-values self.optim = optim # Optimizer self.gradient = ( - gradient # Gradient of Q wrt the control dQ/du (for policy training) + # Gradient of Q wrt the control dQ/du (for policy training) + gradient ) return self @@ -101,7 +103,8 @@ def __init__(self): self.x = x # Network input in Pi(x) self.policy = policy # Network output - self.variables = tf.trainable_variables()[nvars:] # Variables to be trained + # Variables to be trained + self.variables = tf.trainable_variables()[nvars:] def setupOptim(self): qgradient = tf.placeholder(tf.float32, [None, NU]) @@ -110,7 +113,8 @@ def setupOptim(self): zip(grad, self.variables) ) - self.qgradient = qgradient # Q-value gradient wrt control (input value) + # Q-value gradient wrt control (input value) + self.qgradient = qgradient self.optim = optim # Optimizer return self @@ -122,7 +126,7 @@ def setupTargetAssign(self, nominalNet, tau=UPDATE_RATE): return self -### --- Replay memory +# --- Replay memory class ReplayItem: def __init__(self, x, u, r, d, x2): self.x = x @@ -134,7 +138,7 @@ def __init__(self, x, u, r, d, x2): replayDeque = deque() -### --- Tensor flow initialization +# --- Tensor flow initialization policy = PolicyNetwork().setupOptim() policyTarget = PolicyNetwork().setupTargetAssign(policy) @@ -167,24 +171,26 @@ def rendertrial(maxiter=NSTEPS, verbose=True): signal.SIGTSTP, lambda x, y: rendertrial() ) # Roll-out when CTRL-Z is pressed -### History of search +# History of search h_rwd = [] h_qva = [] h_ste = [] -### --- Training +# --- Training for episode in range(1, NEPISODES): x = env.reset().T rsum = 0.0 for step in range(NSTEPS): - u = sess.run(policy.policy, feed_dict={policy.x: x}) # Greedy policy ... + # Greedy policy ... + u = sess.run(policy.policy, feed_dict={policy.x: x}) u += 1.0 / (1.0 + episode + step) # ... with noise x2, r = env.step(u) x2 = x2.T done = False # pendulum scenario is endless. - replayDeque.append(ReplayItem(x, u, r, done, x2)) # Feed replay memory ... + # Feed replay memory ... + replayDeque.append(ReplayItem(x, u, r, done, x2)) if len(replayDeque) > REPLAY_SIZE: replayDeque.popleft() # ... with FIFO forgetting. @@ -260,7 +266,7 @@ def rendertrial(maxiter=NSTEPS, verbose=True): # \\\END_FOR episode in range(NEPISODES) -print("Average reward during trials: %.3f" % (sum(h_rwd) / NEPISODES)) +print(f"Average reward during trials: {sum(h_rwd) / NEPISODES:.3f}") rendertrial() plt.plot(np.cumsum(h_rwd) / range(1, NEPISODES)) plt.show() diff --git a/doc/d-practical-exercises/src/ocp.py b/doc/d-practical-exercises/src/ocp.py index c16beb9ce2..7e79b89a31 100644 --- a/doc/d-practical-exercises/src/ocp.py +++ b/doc/d-practical-exercises/src/ocp.py @@ -36,7 +36,7 @@ def display(U, verbose=False): env.display(x) time.sleep(5e-2) if verbose: - print("X%d" % i, x.T) + print(f"X{i}") class CallBack: @@ -66,8 +66,9 @@ def setWithDisplay(self, boolean=None): callback = CallBack() signal.signal(signal.SIGTSTP, lambda x, y: callback.setWithDisplay()) -### --- OCP resolution -U0 = np.zeros(NSTEPS * env.nu) - env.umax # Initial guess for the control trajectory. +# --- OCP resolution +# Initial guess for the control trajectory. +U0 = np.zeros(NSTEPS * env.nu) - env.umax bounds = ( [ [-env.umax, env.umax], diff --git a/doc/d-practical-exercises/src/qnet.py b/doc/d-practical-exercises/src/qnet.py index 4e1983b9b2..74be5d270d 100644 --- a/doc/d-practical-exercises/src/qnet.py +++ b/doc/d-practical-exercises/src/qnet.py @@ -11,25 +11,25 @@ import tensorflow as tf from dpendulum import DPendulum -### --- Random seed +# --- Random seed RANDOM_SEED = int((time.time() % 10) * 1000) -print("Seed = %d" % RANDOM_SEED) +print(f"Seed = {RANDOM_SEED}") np.random.seed(RANDOM_SEED) tf.set_random_seed(RANDOM_SEED) -### --- Hyper paramaters +# --- Hyper paramaters NEPISODES = 500 # Number of training episodes NSTEPS = 50 # Max episode length LEARNING_RATE = 0.1 # Step length in optimizer DECAY_RATE = 0.99 # Discount factor -### --- Environment +# --- Environment env = DPendulum() NX = env.nx NU = env.nu -### --- Q-value networks +# --- Q-value networks class QValueNetwork: def __init__(self): x = tf.placeholder(shape=[1, NX], dtype=tf.float32) @@ -44,11 +44,12 @@ def __init__(self): self.x = x # Network input self.qvalue = qvalue # Q-value as a function of x self.u = u # Policy as a function of x - self.qref = qref # Reference Q-value at next step (to be set to l+Q o f) + # Reference Q-value at next step (to be set to l+Q o f) + self.qref = qref self.optim = optim # Optimizer -### --- Tensor flow initialization +# --- Tensor flow initialization tf.reset_default_graph() qvalue = QValueNetwork() sess = tf.InteractiveSession() @@ -85,16 +86,17 @@ def rendertrial(maxiter=100): signal.SIGTSTP, lambda x, y: rendertrial() ) # Roll-out when CTRL-Z is pressed -### --- History of search +# --- History of search h_rwd = [] # Learning history (for plot). -### --- Training +# --- Training for episode in range(1, NEPISODES): x = env.reset() rsum = 0.0 for step in range(NSTEPS - 1): - u = sess.run(qvalue.u, feed_dict={qvalue.x: onehot(x)})[0] # Greedy policy ... + # Greedy policy ... + u = sess.run(qvalue.u, feed_dict={qvalue.x: onehot(x)})[0] u = disturb(u, episode) # ... with noise x2, reward = env.step(u) @@ -113,9 +115,9 @@ def rendertrial(maxiter=100): h_rwd.append(rsum) if not episode % 20: - print("Episode #%d done with %d sucess" % (episode, sum(h_rwd[-20:]))) + print(f"Episode #{episode} done with {sum(h_rwd[-20:])} sucess") -print("Total rate of success: %.3f" % (sum(h_rwd) / NEPISODES)) +print(f"Total rate of success: {sum(h_rwd) / NEPISODES:.3f}") rendertrial() plt.plot(np.cumsum(h_rwd) / range(1, NEPISODES)) plt.show() diff --git a/doc/d-practical-exercises/src/qtable.py b/doc/d-practical-exercises/src/qtable.py index 9593bab3a4..a6d9ae9eb5 100644 --- a/doc/d-practical-exercises/src/qtable.py +++ b/doc/d-practical-exercises/src/qtable.py @@ -9,18 +9,18 @@ import numpy as np from dpendulum import DPendulum -### --- Random seed +# --- Random seed RANDOM_SEED = int((time.time() % 10) * 1000) -print("Seed = %d" % RANDOM_SEED) +print(f"Seed = {RANDOM_SEED}") np.random.seed(RANDOM_SEED) -### --- Hyper paramaters +# --- Hyper paramaters NEPISODES = 500 # Number of training episodes NSTEPS = 50 # Max episode length LEARNING_RATE = 0.85 # DECAY_RATE = 0.99 # Discount factor -### --- Environment +# --- Environment env = DPendulum() NX = env.nx # Number of (discrete) states NU = env.nu # Number of (discrete) controls @@ -66,9 +66,9 @@ def rendertrial(maxiter=100): h_rwd.append(rsum) if not episode % 20: - print("Episode #%d done with %d sucess" % (episode, sum(h_rwd[-20:]))) + print(f"Episode #{episode} done with {sum(h_rwd[-20:])} sucess") -print("Total rate of success: %.3f" % (sum(h_rwd) / NEPISODES)) +print(f"Total rate of success: {sum(h_rwd) / NEPISODES:.3f}") rendertrial() plt.plot(np.cumsum(h_rwd) / range(1, NEPISODES)) plt.show() diff --git a/doc/d-practical-exercises/src/robot_hand.py b/doc/d-practical-exercises/src/robot_hand.py index 89eae808bd..05d37690c1 100644 --- a/doc/d-practical-exercises/src/robot_hand.py +++ b/doc/d-practical-exercises/src/robot_hand.py @@ -367,13 +367,13 @@ def body_name(body): # Prepare some patches to represent collision points. Yet unvisible. for i in range(10): self.viewer.viewer.gui.addCylinder( - "world/wa%i" % i, 0.01, 0.003, [1.0, 0, 0, 1] + f"world/wa{i}", 0.01, 0.003, [1.0, 0, 0, 1] ) self.viewer.viewer.gui.addCylinder( - "world/wb%i" % i, 0.01, 0.003, [1.0, 0, 0, 1] + f"world/wb{i}", 0.01, 0.003, [1.0, 0, 0, 1] ) - self.viewer.viewer.gui.setVisibility("world/wa%i" % i, "OFF") - self.viewer.viewer.gui.setVisibility("world/wb%i" % i, "OFF") + self.viewer.viewer.gui.setVisibility(f"world/wa{i}", "OFF") + self.viewer.viewer.gui.setVisibility(f"world/wb{i}", "OFF") def checkCollision(self, pairIndex): ia, ib = self.collisionPairs[pairIndex] @@ -392,10 +392,10 @@ def displayCollision(self, pairIndex, meshIndex, onlyOne=False): ia, ib = self.collisionPairs[pairIndex] va = self.visuals[ia] vb = self.visuals[ib] - va.displayCollision(self.viewer, "world/wa%i" % meshIndex) - vb.displayCollision(self.viewer, "world/wb%i" % meshIndex) - self.viewer.viewer.gui.setVisibility("world/wa%i" % meshIndex, "ON") - self.viewer.viewer.gui.setVisibility("world/wb%i" % meshIndex, "ON") + va.displayCollision(self.viewer, f"world/wa{meshIndex}") + vb.displayCollision(self.viewer, f"world/wb{meshIndex}") + self.viewer.viewer.gui.setVisibility(f"world/wa{meshIndex}", "ON") + self.viewer.viewer.gui.setVisibility(f"world/wb{meshIndex}", "ON") def display(self, q): pin.forwardKinematics(self.model, self.data, q) diff --git a/doc/d-practical-exercises/src/ur5x4.py b/doc/d-practical-exercises/src/ur5x4.py index 1587ada25b..2c2c2cede8 100644 --- a/doc/d-practical-exercises/src/ur5x4.py +++ b/doc/d-practical-exercises/src/ur5x4.py @@ -36,9 +36,7 @@ def loadRobot(M0, name): # Load 4 Ur5 robots, placed at 0.3m from origin in the 4 directions x,y,-x,-y. Mt = SE3(eye(3), np.array([0.3, 0, 0])) # First robot is simply translated for i in range(4): - robots.append( - loadRobot(SE3(rotate("z", np.pi / 2 * i), zero(3)) * Mt, "robot%d" % i) - ) + robots.append(loadRobot(SE3(rotate("z", np.pi / 2 * i), zero(3)) * Mt, f"robot{i}")) # Set up the robots configuration with end effector pointed upward. q0 = np.array([np.pi / 4, -np.pi / 4, -np.pi / 2, np.pi / 4, np.pi / 2, 0]) diff --git a/examples/inverse-kinematics-3d.py b/examples/inverse-kinematics-3d.py index 1b4dd256bf..ba0910f3fd 100644 --- a/examples/inverse-kinematics-3d.py +++ b/examples/inverse-kinematics-3d.py @@ -30,7 +30,7 @@ v = -J.T.dot(solve(J.dot(J.T) + damp * np.eye(3), err)) q = pinocchio.integrate(model, q, v * DT) if not it % 10: - print("%d: error = %s" % (it, err.T)) + print(f"{it}: error = {err.T}") it += 1 if success: diff --git a/examples/inverse-kinematics.py b/examples/inverse-kinematics.py index 693126b47b..0710442732 100644 --- a/examples/inverse-kinematics.py +++ b/examples/inverse-kinematics.py @@ -30,7 +30,7 @@ v = -J.T.dot(solve(J.dot(J.T) + damp * np.eye(6), err)) q = pinocchio.integrate(model, q, v * DT) if not i % 10: - print("%d: error = %s" % (i, err.T)) + print(f"{i}: error = {err.T}") i += 1 if success: diff --git a/examples/reachable-workspace-with-collisions.py b/examples/reachable-workspace-with-collisions.py index 7239f377ba..0be9e2e65f 100644 --- a/examples/reachable-workspace-with-collisions.py +++ b/examples/reachable-workspace-with-collisions.py @@ -44,7 +44,7 @@ def XYZRPYtoSE3(xyzrpy): obs.meshColor = np.array( [1.0, 0.2, 0.2, 1.0] ) # Don't forget me, otherwise I am transparent ... - obs.name = "obs%d" % i # Set object name + obs.name = f"obs{i}" # Set object name obs.parentJoint = 0 # Set object parent = 0 = universe obs.placement = XYZRPYtoSE3(xyzrpy) # Set object placement wrt parent collision_model.addGeometryObject(obs) # Add object to collision model diff --git a/include/pinocchio/math/multiprecision.hpp b/include/pinocchio/math/multiprecision.hpp index feaee818c1..08cfa8b9f6 100644 --- a/include/pinocchio/math/multiprecision.hpp +++ b/include/pinocchio/math/multiprecision.hpp @@ -118,10 +118,10 @@ namespace Eigen static int digits10() { return digits10_imp( - boost::mpl::bool_ < std::numeric_limits::digits10 - && (std::numeric_limits::digits10 != INT_MAX) - ? true - : false > ()); + boost::mpl::bool_< + std::numeric_limits::digits10 && (std::numeric_limits::digits10 != INT_MAX) + ? true + : false>()); } constexpr static inline int digits() diff --git a/models/simple_model.py b/models/simple_model.py index dac4168e35..1cdb98afd2 100644 --- a/models/simple_model.py +++ b/models/simple_model.py @@ -21,7 +21,7 @@ def placement(x=0, y=0, z=0, rx=0, ry=0, rz=0): def color(body_number=1): - return [int(i) for i in "%03d" % int(bin(body_number % 8)[2:])] + [1] + return [int(i) for i in f"{int(bin(body_number % 8)[2:]):03d}"] + [1] class ModelWrapper: diff --git a/unittest/python/test_case.py b/unittest/python/test_case.py index c6e08459d5..c4545c84f4 100644 --- a/unittest/python/test_case.py +++ b/unittest/python/test_case.py @@ -4,7 +4,7 @@ def tracefunc(frame, event, arg): - print("%s, %s: %d" % (event, frame.f_code.co_filename, frame.f_lineno)) + print(f"{event}, {frame.f_code.co_filename}: {frame.f_lineno}") return tracefunc