Skip to content

Commit

Permalink
sphinxify java docs for python code (PhotonVision#1575)
Browse files Browse the repository at this point in the history
  • Loading branch information
LucienMorey authored Nov 16, 2024
1 parent 9bbf49b commit 04191ef
Show file tree
Hide file tree
Showing 10 changed files with 628 additions and 3 deletions.
105 changes: 105 additions & 0 deletions photon-lib/py/photonlibpy/estimation/openCVHelp.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,12 @@ def rotationNWUtoEDN(rot: Rotation3d) -> Rotation3d:

@staticmethod
def translationToTVec(translations: list[Translation3d]) -> np.ndarray:
"""Creates a new :class:`np.array` with these 3d translations. The opencv tvec is a vector with
three elements representing {x, y, z} in the EDN coordinate system.
:param translations: The translations to convert into a np.array
"""

retVal: list[list] = []
for translation in translations:
trl = OpenCVHelp.translationNWUtoEDN(translation)
Expand All @@ -38,6 +44,13 @@ def translationToTVec(translations: list[Translation3d]) -> np.ndarray:

@staticmethod
def rotationToRVec(rotation: Rotation3d) -> np.ndarray:
"""Creates a new :class:`.np.array` with this 3d rotation. The opencv rvec Mat is a vector with
three elements representing the axis scaled by the angle in the EDN coordinate system. (angle =
norm, and axis = rvec / norm)
:param rotation: The rotation to convert into a np.array
"""

retVal: list[np.ndarray] = []
rot = OpenCVHelp.rotationNWUtoEDN(rotation)
rotVec = rot.getQuaternion().toRotationVector()
Expand Down Expand Up @@ -88,6 +101,25 @@ def projectPoints(
def reorderCircular(
elements: list[Any] | np.ndarray, backwards: bool, shiftStart: int
) -> list[Any]:
"""Reorders the list, optionally indexing backwards and wrapping around to the last element after
the first, and shifting all indices in the direction of indexing.
e.g.
({1,2,3}, false, 1) == {2,3,1}
({1,2,3}, true, 0) == {1,3,2}
({1,2,3}, true, 1) == {3,2,1}
:param elements: list elements
:param backwards: If indexing should happen in reverse (0, size-1, size-2, ...)
:param shiftStart: How much the initial index should be shifted (instead of starting at index 0,
start at shiftStart, negated if backwards)
:returns: Reordered list
"""

size = len(elements)
reordered = []
dir = -1 if backwards else 1
Expand All @@ -100,18 +132,39 @@ def reorderCircular(

@staticmethod
def translationEDNToNWU(trl: Translation3d) -> Translation3d:
"""Convert a rotation delta from EDN to NWU. For example, if you have a rotation X,Y,Z {1, 0, 0}
in EDN, this would be {0, -1, 0} in NWU.
"""

return trl.rotateBy(EDN_TO_NWU)

@staticmethod
def rotationEDNToNWU(rot: Rotation3d) -> Rotation3d:
"""Convert a rotation delta from NWU to EDN. For example, if you have a rotation X,Y,Z {1, 0, 0}
in NWU, this would be {0, 0, 1} in EDN.
"""

return -EDN_TO_NWU + (rot + EDN_TO_NWU)

@staticmethod
def tVecToTranslation(tvecInput: np.ndarray) -> Translation3d:
"""Returns a new 3d translation from this :class:`.Mat`. The opencv tvec is a vector with three
elements representing {x, y, z} in the EDN coordinate system.
:param tvecInput: The tvec to create a Translation3d from
"""

return OpenCVHelp.translationEDNToNWU(Translation3d(tvecInput))

@staticmethod
def rVecToRotation(rvecInput: np.ndarray) -> Rotation3d:
"""Returns a 3d rotation from this :class:`.Mat`. The opencv rvec Mat is a vector with three
elements representing the axis scaled by the angle in the EDN coordinate system. (angle = norm,
and axis = rvec / norm)
:param rvecInput: The rvec to create a Rotation3d from
"""

return OpenCVHelp.rotationEDNToNWU(Rotation3d(rvecInput))

@staticmethod
Expand All @@ -121,6 +174,33 @@ def solvePNP_Square(
modelTrls: list[Translation3d],
imagePoints: np.ndarray,
) -> PnpResult | None:
"""Finds the transformation(s) that map the camera's pose to the target's pose. The camera's pose
relative to the target is determined by the supplied 3d points of the target's model and their
associated 2d points imaged by the camera. The supplied model translations must be relative to
the target's pose.
For planar targets, there may be an alternate solution which is plausible given the 2d image
points. This has an associated "ambiguity" which describes the ratio of reprojection error
between the "best" and "alternate" solution.
This method is intended for use with individual AprilTags, and will not work unless 4 points
are provided.
:param cameraMatrix: The camera intrinsics matrix in standard opencv form
:param distCoeffs: The camera distortion matrix in standard opencv form
:param modelTrls: The translations of the object corners. These should have the object pose as
their origin. These must come in a specific, pose-relative order (in NWU):
- Point 0: [0, -squareLength / 2, squareLength / 2]
- Point 1: [0, squareLength / 2, squareLength / 2]
- Point 2: [0, squareLength / 2, -squareLength / 2]
- Point 3: [0, -squareLength / 2, -squareLength / 2]
:param imagePoints: The projection of these 3d object points into the 2d camera image. The order
should match the given object point translations.
:returns: The resulting transformation that maps the camera pose to the target pose and the
ambiguity if an alternate solution is available.
"""
modelTrls = OpenCVHelp.reorderCircular(modelTrls, True, -1)
imagePoints = np.array(OpenCVHelp.reorderCircular(imagePoints, True, -1))
objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))
Expand All @@ -130,6 +210,7 @@ def solvePNP_Square(
best: Transform3d = Transform3d()

for tries in range(2):
# calc rvecs/tvecs and associated reprojection error from image points
retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
objectMat,
imagePoints,
Expand All @@ -138,6 +219,7 @@ def solvePNP_Square(
flags=cv.SOLVEPNP_IPPE_SQUARE,
)

# convert to wpilib coordinates
best = Transform3d(
OpenCVHelp.tVecToTranslation(tvecs[0]),
OpenCVHelp.rVecToRotation(rvecs[0]),
Expand All @@ -148,6 +230,7 @@ def solvePNP_Square(
OpenCVHelp.rVecToRotation(rvecs[1]),
)

# check if we got a NaN result
if reprojectionError is not None and not math.isnan(
reprojectionError[0, 0]
):
Expand All @@ -158,6 +241,7 @@ def solvePNP_Square(
pt[0, 1] -= 0.001
imagePoints[0] = pt

# solvePnP failed
if reprojectionError is None or math.isnan(reprojectionError[0, 0]):
print("SolvePNP_Square failed!")
return None
Expand Down Expand Up @@ -186,6 +270,27 @@ def solvePNP_SQPNP(
modelTrls: list[Translation3d],
imagePoints: np.ndarray,
) -> PnpResult | None:
"""Finds the transformation that maps the camera's pose to the origin of the supplied object. An
"object" is simply a set of known 3d translations that correspond to the given 2d points. If,
for example, the object translations are given relative to close-right corner of the blue
alliance(the default origin), a camera-to-origin transformation is returned. If the
translations are relative to a target's pose, a camera-to-target transformation is returned.
There must be at least 3 points to use this method. This does not return an alternate
solution-- if you are intending to use solvePNP on a single AprilTag, see {@link
#solvePNP_SQUARE} instead.
:param cameraMatrix: The camera intrinsics matrix in standard opencv form
:param distCoeffs: The camera distortion matrix in standard opencv form
:param objectTrls: The translations of the object corners, relative to the field.
:param imagePoints: The projection of these 3d object points into the 2d camera image. The order
should match the given object point translations.
:returns: The resulting transformation that maps the camera pose to the target pose. If the 3d
model points are supplied relative to the origin, this transformation brings the camera to
the origin.
"""

objectMat = np.array(OpenCVHelp.translationToTVec(modelTrls))

retval, rvecs, tvecs, reprojectionError = cv.solvePnPGeneric(
Expand Down
19 changes: 19 additions & 0 deletions photon-lib/py/photonlibpy/estimation/rotTrlTransform3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,38 @@


class RotTrlTransform3d:
"""Represents a transformation that first rotates a pose around the origin, and then translates it."""

def __init__(
self, rot: Rotation3d = Rotation3d(), trl: Translation3d = Translation3d()
):
"""A rotation-translation transformation.
Applying this RotTrlTransform3d to poses will preserve their current origin-to-pose
transform as if the origin was transformed by these components instead.
:param rot: The rotation component
:param trl: The translation component
"""
self.rot = rot
self.trl = trl

def inverse(self) -> Self:
"""The inverse of this transformation. Applying the inverse will "undo" this transformation."""
invRot = -self.rot
invTrl = -(self.trl.rotateBy(invRot))
return type(self)(invRot, invTrl)

def getTransform(self) -> Transform3d:
"""This transformation as a Transform3d (as if of the origin)"""
return Transform3d(self.trl, self.rot)

def getTranslation(self) -> Translation3d:
"""The translation component of this transformation"""
return self.trl

def getRotation(self) -> Rotation3d:
"""The rotation component of this transformation"""
return self.rot

def applyTranslation(self, trlToApply: Translation3d) -> Translation3d:
Expand All @@ -44,6 +58,11 @@ def applyTrls(self, rots: list[Rotation3d]) -> list[Rotation3d]:

@classmethod
def makeRelativeTo(cls, pose: Pose3d) -> Self:
"""The rotation-translation transformation that makes poses in the world consider this pose as the
new origin, or change the basis to this pose.
:param pose: The new origin
"""
return cls(pose.rotation(), pose.translation()).inverse()

@classmethod
Expand Down
63 changes: 63 additions & 0 deletions photon-lib/py/photonlibpy/estimation/targetModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,27 @@


class TargetModel:
"""Describes the 3d model of a target."""

def __init__(self):
"""Default constructor for initialising internal class members. DO NOT USE THIS!!! USE THE createPlanar,
createCuboid, createSpheroid or create Arbitrary
"""
self.vertices: List[Translation3d] = []
self.isPlanar = False
self.isSpherical = False

@classmethod
def createPlanar(cls, width: meters, height: meters) -> Self:
"""Creates a rectangular, planar target model given the width and height. The model has four
vertices:
- Point 0: [0, -width/2, -height/2]
- Point 1: [0, width/2, -height/2]
- Point 2: [0, width/2, height/2]
- Point 3: [0, -width/2, height/2]
"""

tm = cls()

tm.isPlanar = True
Expand All @@ -30,6 +43,18 @@ def createPlanar(cls, width: meters, height: meters) -> Self:

@classmethod
def createCuboid(cls, length: meters, width: meters, height: meters) -> Self:
"""Creates a cuboid target model given the length, width, height. The model has eight vertices:
- Point 0: [length/2, -width/2, -height/2]
- Point 1: [length/2, width/2, -height/2]
- Point 2: [length/2, width/2, height/2]
- Point 3: [length/2, -width/2, height/2]
- Point 4: [-length/2, -width/2, height/2]
- Point 5: [-length/2, width/2, height/2]
- Point 6: [-length/2, width/2, -height/2]
- Point 7: [-length/2, -width/2, -height/2]
"""

tm = cls()
verts = [
Translation3d(length / 2.0, -width / 2.0, -height / 2.0),
Expand All @@ -48,6 +73,20 @@ def createCuboid(cls, length: meters, width: meters, height: meters) -> Self:

@classmethod
def createSpheroid(cls, diameter: meters) -> Self:
"""Creates a spherical target model which has similar dimensions regardless of its rotation. This
model has four vertices:
- Point 0: [0, -radius, 0]
- Point 1: [0, 0, -radius]
- Point 2: [0, radius, 0]
- Point 3: [0, 0, radius]
*Q: Why these vertices?* A: This target should be oriented to the camera every frame, much
like a sprite/decal, and these vertices represent the ellipse vertices (maxima). These vertices
are used for drawing the image of this sphere, but do not match the corners that will be
published by photonvision.
"""

tm = cls()

tm.isPlanar = False
Expand All @@ -63,6 +102,14 @@ def createSpheroid(cls, diameter: meters) -> Self:

@classmethod
def createArbitrary(cls, verts: List[Translation3d]) -> Self:
"""Creates a target model from arbitrary 3d vertices. Automatically determines if the given
vertices are planar(x == 0). More than 2 vertices must be given. If this is a planar model, the
vertices should define a non-intersecting contour.
:param vertices: Translations representing the vertices of this target model relative to its
pose.
"""

tm = cls()
tm._common_construction(verts)

Expand All @@ -83,6 +130,12 @@ def _common_construction(self, verts: List[Translation3d]) -> None:
self.vertices = verts

def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:
"""This target's vertices offset from its field pose.
Note: If this target is spherical, use {@link #getOrientedPose(Translation3d,
Translation3d)} with this method.
"""

basisChange = RotTrlTransform3d(targetPose.rotation(), targetPose.translation())

retVal = []
Expand All @@ -94,6 +147,16 @@ def getFieldVertices(self, targetPose: Pose3d) -> List[Translation3d]:

@classmethod
def getOrientedPose(cls, tgtTrl: Translation3d, cameraTrl: Translation3d):
"""Returns a Pose3d with the given target translation oriented (with its relative x-axis aligned)
to the camera translation. This is used for spherical targets which should not have their
projection change regardless of their own rotation.
:param tgtTrl: This target's translation
:param cameraTrl: Camera's translation
:returns: This target's pose oriented to the camera
"""

relCam = cameraTrl - tgtTrl
orientToCam = Rotation3d(
0.0,
Expand Down
Loading

0 comments on commit 04191ef

Please sign in to comment.