diff --git a/P2/src/praktikum2/praktikum2/p2_aruco_marker_detection.py b/P2/src/praktikum2/praktikum2/p2_aruco_marker_detection.py index d82c009..d39c955 100755 --- a/P2/src/praktikum2/praktikum2/p2_aruco_marker_detection.py +++ b/P2/src/praktikum2/praktikum2/p2_aruco_marker_detection.py @@ -10,89 +10,75 @@ import numpy as np class ArucoDetect(Node): def __init__(self): - # Init node + + #init node super().__init__("aruco_node") + # Bridge für ROS <-> OpenCV Konvertierung initialisieren self.bridge = CvBridge() - # Load dictionary for aruco marker in 6x6_50 format (Vorgabe aus PDF) - # Hinweis: Das PDF nutzt die objektorientierte Methode (neuere OpenCV Versionen) - self.ar_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_50) - self.aruco_params = cv2.aruco.DetectorParameters() - self.aruco_detector = cv2.aruco.ArucoDetector(self.ar_dict, self.aruco_params) + #load dictionary for aruco marker in 6x6_50 format + ar_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_50) + self.aruco_detector = cv2.aruco.ArucoDetector(ar_dict) - # TODO: create a subscriber for gripper camera image (sensor_msgs/Image) - # Basierend auf PDF Seite 2 ist das Topic: /SpotArm/gripper_camera/image_color + # TODO: create a subscriber for gripper camera image (sensor_msgs/Image) and connect to camera_cb + ''' + create_subscription( + msg_type: Any, + topic: str, + callback: (MsgType@create_subscription) -> None, + qos_profile: QoSProfile | int + ) + ''' self.create_subscription( Image, '/SpotArm/gripper_camera/image_color', self.camera_cb, 10 ) + self.get_logger().info("Aruco Node gestartet und wartet auf Bilder...") + def camera_cb(self, msg): # Convert from ROS msg to OpenCV image - try: - # 'bgr8' ist das Standard-Farbformat für OpenCV - spot_gripper_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8') - except Exception as e: - self.get_logger().error(f"Fehler bei der Bildkonvertierung: {e}") - return + spot_gripper_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8') # Performs marker detection, returns coordinates of corner points - corners, ids, rejected = self.aruco_detector.detectMarkers(spot_gripper_img) + (corners, ids, rejected) = self.aruco_detector.detectMarkers(spot_gripper_img) - # Überprüfen, ob Marker gefunden wurden - if len(corners) > 0: # Im PDF stand hier ein Tippfehler ($>8$), >0 ist logisch korrekt - - # Wir nehmen den ersten gefundenen Marker - # flatten the ArUco IDs list - ids = ids.flatten() + if len(corners) > 0: #only if marker detected + corners=corners[0].squeeze() + ptA, ptB, ptC, ptD = corners + ptB = (int(ptB[0]), int(ptB[1])) + ptC = (int(ptC[0]), int(ptC[1])) + ptD = (int(ptD[0]), int(ptD[1])) + ptA = (int(ptA[0]), int(ptA[1])) - # loop over the detected ArUCo corners - for (markerCorner, markerID) in zip(corners, ids): - # extract the marker corners (which are always returned in - # top-left, top-right, bottom-right, and bottom-left order) - corners_squeezed = markerCorner.reshape((4, 2)) - (topLeft, topRight, bottomRight, bottomLeft) = corners_squeezed + # TODO: draw a coloured bounding box around the marker + # Wir zeichnen Linien zwischen den Eckpunkten (Grün: (0, 255, 0), Dicke: 2) + cv2.line(spot_gripper_img, ptA, ptB, (0, 255, 0), 2) + cv2.line(spot_gripper_img, ptB, ptC, (0, 255, 0), 2) + cv2.line(spot_gripper_img, ptC, ptD, (0, 255, 0), 2) + cv2.line(spot_gripper_img, ptD, ptA, (0, 255, 0), 2) - # convert each of the (x, y)-coordinate pairs to integers - ptA = (int(topLeft[0]), int(topLeft[1])) - ptB = (int(topRight[0]), int(topRight[1])) - ptC = (int(bottomRight[0]), int(bottomRight[1])) - ptD = (int(bottomLeft[0]), int(bottomLeft[1])) + # Loggen, dass etwas gefunden wurde + # self.get_logger().info(f"Marker ID {markerID} gefunden.") - # TODO: draw a coloured bounding box around the marker - # Wir zeichnen Linien zwischen den Eckpunkten (Grün: (0, 255, 0), Dicke: 2) - cv2.line(spot_gripper_img, ptA, ptB, (0, 255, 0), 2) - cv2.line(spot_gripper_img, ptB, ptC, (0, 255, 0), 2) - cv2.line(spot_gripper_img, ptC, ptD, (0, 255, 0), 2) - cv2.line(spot_gripper_img, ptD, ptA, (0, 255, 0), 2) + # TODO: resize image for display + # Nur resizen, wenn das Bild breiter als 800px ist, ansonsten Original lassen + max_width = 800 + if spot_gripper_img.shape[1] > max_width: + scale_percent = max_width / spot_gripper_img.shape[1] - # Optional: Mittelpunkt und ID zeichnen (hilft beim Debuggen) - cX = int((topLeft[0] + bottomRight[0]) / 2.0) - cY = int((topLeft[1] + bottomRight[1]) / 2.0) - cv2.circle(spot_gripper_img, (cX, cY), 4, (0, 0, 255), -1) - cv2.putText(spot_gripper_img, str(markerID), - (ptA[0], ptA[1] - 15), cv2.FONT_HERSHEY_SIMPLEX, - 0.5, (0, 255, 0), 2) - - # Loggen, dass etwas gefunden wurde - # self.get_logger().info(f"Marker ID {markerID} gefunden.") + width = max_width + height = int(spot_gripper_img.shape[0] * scale_percent) + + dim = (width, height) - # TODO: resize image for display - # Das Gripper-Bild kann groß sein, wir skalieren es zur besseren Anzeige (z.B. 50%) - # Oder auf feste Größe: (640, 480) - scale_percent = 50 # Prozent der Originalgröße - width = int(spot_gripper_img.shape[1] * scale_percent / 100) - height = int(spot_gripper_img.shape[0] * scale_percent / 100) - dim = (width, height) - - # Nur resizen, wenn das Bild sehr groß ist, ansonsten Original lassen - if spot_gripper_img.shape[1] > 800: resized_img = cv2.resize(spot_gripper_img, dim, interpolation = cv2.INTER_AREA) + else: resized_img = spot_gripper_img @@ -105,16 +91,9 @@ class ArucoDetect(Node): def main(args = None): rclpy.init(args=args) node = ArucoDetect() - - try: - rclpy.spin(node) - except KeyboardInterrupt: - pass - finally: - # Cleanup - node.destroy_node() - rclpy.shutdown() - cv2.destroyAllWindows() + rclpy.spin(node) + rclpy.shutdown() + if __name__ == "__main__": main() \ No newline at end of file