P2.1 Marker Detection final

This commit is contained in:
Safak Hazinedar
2025-12-07 17:51:59 +01:00
parent 038c606f27
commit 4c9f879bde

View File

@@ -10,89 +10,75 @@ import numpy as np
class ArucoDetect(Node): class ArucoDetect(Node):
def __init__(self): def __init__(self):
# Init node
#init node
super().__init__("aruco_node") super().__init__("aruco_node")
# Bridge für ROS <-> OpenCV Konvertierung initialisieren # Bridge für ROS <-> OpenCV Konvertierung initialisieren
self.bridge = CvBridge() self.bridge = CvBridge()
# Load dictionary for aruco marker in 6x6_50 format (Vorgabe aus PDF) #load dictionary for aruco marker in 6x6_50 format
# Hinweis: Das PDF nutzt die objektorientierte Methode (neuere OpenCV Versionen) ar_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_50)
self.ar_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_50) self.aruco_detector = cv2.aruco.ArucoDetector(ar_dict)
self.aruco_params = cv2.aruco.DetectorParameters()
self.aruco_detector = cv2.aruco.ArucoDetector(self.ar_dict, self.aruco_params)
# TODO: create a subscriber for gripper camera image (sensor_msgs/Image) # TODO: create a subscriber for gripper camera image (sensor_msgs/Image) and connect to camera_cb
# Basierend auf PDF Seite 2 ist das Topic: /SpotArm/gripper_camera/image_color '''
create_subscription(
msg_type: Any,
topic: str,
callback: (MsgType@create_subscription) -> None,
qos_profile: QoSProfile | int
)
'''
self.create_subscription( self.create_subscription(
Image, Image,
'/SpotArm/gripper_camera/image_color', '/SpotArm/gripper_camera/image_color',
self.camera_cb, self.camera_cb,
10 10
) )
self.get_logger().info("Aruco Node gestartet und wartet auf Bilder...") self.get_logger().info("Aruco Node gestartet und wartet auf Bilder...")
def camera_cb(self, msg): def camera_cb(self, msg):
# Convert from ROS msg to OpenCV image # Convert from ROS msg to OpenCV image
try: spot_gripper_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
# 'bgr8' ist das Standard-Farbformat für OpenCV
spot_gripper_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
except Exception as e:
self.get_logger().error(f"Fehler bei der Bildkonvertierung: {e}")
return
# Performs marker detection, returns coordinates of corner points # Performs marker detection, returns coordinates of corner points
corners, ids, rejected = self.aruco_detector.detectMarkers(spot_gripper_img) (corners, ids, rejected) = self.aruco_detector.detectMarkers(spot_gripper_img)
# Überprüfen, ob Marker gefunden wurden if len(corners) > 0: #only if marker detected
if len(corners) > 0: # Im PDF stand hier ein Tippfehler ($>8$), >0 ist logisch korrekt corners=corners[0].squeeze()
ptA, ptB, ptC, ptD = corners
# Wir nehmen den ersten gefundenen Marker ptB = (int(ptB[0]), int(ptB[1]))
# flatten the ArUco IDs list ptC = (int(ptC[0]), int(ptC[1]))
ids = ids.flatten() ptD = (int(ptD[0]), int(ptD[1]))
ptA = (int(ptA[0]), int(ptA[1]))
# loop over the detected ArUCo corners # TODO: draw a coloured bounding box around the marker
for (markerCorner, markerID) in zip(corners, ids): # Wir zeichnen Linien zwischen den Eckpunkten (Grün: (0, 255, 0), Dicke: 2)
# extract the marker corners (which are always returned in cv2.line(spot_gripper_img, ptA, ptB, (0, 255, 0), 2)
# top-left, top-right, bottom-right, and bottom-left order) cv2.line(spot_gripper_img, ptB, ptC, (0, 255, 0), 2)
corners_squeezed = markerCorner.reshape((4, 2)) cv2.line(spot_gripper_img, ptC, ptD, (0, 255, 0), 2)
(topLeft, topRight, bottomRight, bottomLeft) = corners_squeezed cv2.line(spot_gripper_img, ptD, ptA, (0, 255, 0), 2)
# convert each of the (x, y)-coordinate pairs to integers # Loggen, dass etwas gefunden wurde
ptA = (int(topLeft[0]), int(topLeft[1])) # self.get_logger().info(f"Marker ID {markerID} gefunden.")
ptB = (int(topRight[0]), int(topRight[1]))
ptC = (int(bottomRight[0]), int(bottomRight[1]))
ptD = (int(bottomLeft[0]), int(bottomLeft[1]))
# TODO: draw a coloured bounding box around the marker # TODO: resize image for display
# Wir zeichnen Linien zwischen den Eckpunkten (Grün: (0, 255, 0), Dicke: 2) # Nur resizen, wenn das Bild breiter als 800px ist, ansonsten Original lassen
cv2.line(spot_gripper_img, ptA, ptB, (0, 255, 0), 2) max_width = 800
cv2.line(spot_gripper_img, ptB, ptC, (0, 255, 0), 2) if spot_gripper_img.shape[1] > max_width:
cv2.line(spot_gripper_img, ptC, ptD, (0, 255, 0), 2) scale_percent = max_width / spot_gripper_img.shape[1]
cv2.line(spot_gripper_img, ptD, ptA, (0, 255, 0), 2)
# Optional: Mittelpunkt und ID zeichnen (hilft beim Debuggen) width = max_width
cX = int((topLeft[0] + bottomRight[0]) / 2.0) height = int(spot_gripper_img.shape[0] * scale_percent)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(spot_gripper_img, (cX, cY), 4, (0, 0, 255), -1) dim = (width, height)
cv2.putText(spot_gripper_img, str(markerID),
(ptA[0], ptA[1] - 15), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
# Loggen, dass etwas gefunden wurde
# self.get_logger().info(f"Marker ID {markerID} gefunden.")
# TODO: resize image for display
# Das Gripper-Bild kann groß sein, wir skalieren es zur besseren Anzeige (z.B. 50%)
# Oder auf feste Größe: (640, 480)
scale_percent = 50 # Prozent der Originalgröße
width = int(spot_gripper_img.shape[1] * scale_percent / 100)
height = int(spot_gripper_img.shape[0] * scale_percent / 100)
dim = (width, height)
# Nur resizen, wenn das Bild sehr groß ist, ansonsten Original lassen
if spot_gripper_img.shape[1] > 800:
resized_img = cv2.resize(spot_gripper_img, dim, interpolation = cv2.INTER_AREA) resized_img = cv2.resize(spot_gripper_img, dim, interpolation = cv2.INTER_AREA)
else: else:
resized_img = spot_gripper_img resized_img = spot_gripper_img
@@ -105,16 +91,9 @@ class ArucoDetect(Node):
def main(args = None): def main(args = None):
rclpy.init(args=args) rclpy.init(args=args)
node = ArucoDetect() node = ArucoDetect()
rclpy.spin(node)
try: rclpy.shutdown()
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
# Cleanup
node.destroy_node()
rclpy.shutdown()
cv2.destroyAllWindows()
if __name__ == "__main__": if __name__ == "__main__":
main() main()