|
| 1 | +import cv2 |
| 2 | +import numpy as np |
| 3 | +from PyQt5.QtCore import QPointF |
| 4 | +from PyQt5.QtWidgets import QDialog |
| 5 | +from point import Point |
| 6 | +from ui.calibration_dialog import CalibrationDialog |
| 7 | +import random |
| 8 | +class Calibration: |
| 9 | + """Class to manage calibration of images to real-world coordinates.""" |
| 10 | + |
| 11 | + def __init__(self, main_window): |
| 12 | + self.main_window = main_window |
| 13 | + self.calibration_points = [] |
| 14 | + self.transformation_matrix = None |
| 15 | + self.inverse_transformation_matrix = None |
| 16 | + self.calibration_done = False |
| 17 | + self.automatic_calibration_mode = False |
| 18 | + self.calibration_cancelled = False |
| 19 | + |
| 20 | + def add_calibration_point(self, point: QPointF, automatic=False): |
| 21 | + """Adds a calibration point and triggers dialog for real coordinates input.""" |
| 22 | + if len(self.calibration_points) < 4: # Allow selecting 4 calibration points |
| 23 | + point_obj = Point(point, point_type='calibration') |
| 24 | + self.calibration_points.append(point_obj) |
| 25 | + |
| 26 | + self.main_window.image_view.highlight_point(point_obj) |
| 27 | + self.main_window.image_view.update_scene() |
| 28 | + dialog = CalibrationDialog(self.main_window, automatic_calibration=automatic) |
| 29 | + dialog.setWindowTitle(f"Enter Real Coordinates for Point {len(self.calibration_points)}") |
| 30 | + result = dialog.exec() |
| 31 | + if result == QDialog.Accepted: |
| 32 | + real_x, real_y = dialog.real_coordinates |
| 33 | + point_obj.set_real_coordinates(QPointF(real_x, real_y)) |
| 34 | + self.main_window.image_view.delete_highlight(point_obj) |
| 35 | + self.main_window.image_view.draw_calibration_points(self.calibration_points) |
| 36 | + if len(self.calibration_points) == 4: # Calculate, refine transformation matrix |
| 37 | + self.calculate_transformation_matrix() |
| 38 | + self.refine_calibration() |
| 39 | + elif result == 1000: # Next point |
| 40 | + self.calibration_points.pop() |
| 41 | + self.main_window.image_view.delete_highlight(point_obj) |
| 42 | + new_point = self.select_random_point() |
| 43 | + if new_point is not None and not self.calibration_cancelled: |
| 44 | + self.add_calibration_point(new_point, automatic=True) |
| 45 | + else: |
| 46 | + self.calibration_points.pop() |
| 47 | + self.main_window.image_view.delete_highlight(point_obj) |
| 48 | + if automatic: |
| 49 | + self.calibration_cancelled = True # Mark calibration as cancelled |
| 50 | + self.clear_calibration_points() |
| 51 | + |
| 52 | + def clear_calibration_points(self): |
| 53 | + """Clears all calibration points.""" |
| 54 | + self.main_window.image_view.clear_calibration_points() |
| 55 | + self.calibration_points = [] |
| 56 | + self.calibration_done = False |
| 57 | + |
| 58 | + def calculate_transformation_matrix(self): |
| 59 | + """Calculates the transformation matrix based on calibration points.""" |
| 60 | + if len(self.calibration_points) != 4: |
| 61 | + raise ValueError("Exactly 4 calibration points are required to calculate the transformation matrix.") |
| 62 | + |
| 63 | + image_points = np.array( |
| 64 | + [[p.get_image_coordinates().x(), p.get_image_coordinates().y()] for p in self.calibration_points], |
| 65 | + dtype=np.float32) |
| 66 | + real_coords = np.array( |
| 67 | + [[p.get_real_coordinates().x(), p.get_real_coordinates().y()] for p in self.calibration_points], |
| 68 | + dtype=np.float32) |
| 69 | + |
| 70 | + self.transformation_matrix, _ = cv2.findHomography(image_points, real_coords) |
| 71 | + self.inverse_transformation_matrix = np.linalg.inv(self.transformation_matrix) |
| 72 | + |
| 73 | + self.calibration_done = True |
| 74 | + self.main_window.interpolationAction.setEnabled(True) |
| 75 | + |
| 76 | + def refine_calibration(self, iterations=500, termination_eps=1e-6): |
| 77 | + """Refines the calibration matrix using iterative optimization.""" |
| 78 | + if len(self.calibration_points) != 4: |
| 79 | + raise ValueError("Exactly 4 calibration points are required to refine the calibration matrix.") |
| 80 | + |
| 81 | + image_points = np.array( |
| 82 | + [[p.get_image_coordinates().x(), p.get_image_coordinates().y()] for p in self.calibration_points], |
| 83 | + dtype=np.float32) |
| 84 | + real_coords = np.array( |
| 85 | + [[p.get_real_coordinates().x(), p.get_real_coordinates().y()] for p in self.calibration_points], |
| 86 | + dtype=np.float32) |
| 87 | + |
| 88 | + self.transformation_matrix, _ = cv2.findHomography(image_points, real_coords, method=cv2.RANSAC, ransacReprojThreshold=5.0, maxIters=iterations, confidence=0.99) |
| 89 | + self.inverse_transformation_matrix = np.linalg.inv(self.transformation_matrix) |
| 90 | + |
| 91 | + self.calibration_done = True |
| 92 | + self.main_window.interpolationAction.setEnabled(True) |
| 93 | + |
| 94 | + def transform_points(self, data_points): |
| 95 | + """Transforms data points using the calibration matrix.""" |
| 96 | + if not self.calibration_done: |
| 97 | + raise RuntimeError("Calibration is not complete. Please complete calibration before transforming points.") |
| 98 | + |
| 99 | + transformed_points = [] |
| 100 | + for point in data_points: |
| 101 | + img_coords = np.array([[point.get_image_coordinates().x(), point.get_image_coordinates().y(), 1.0]], |
| 102 | + dtype=np.float32) |
| 103 | + real_coords = cv2.perspectiveTransform(np.array([img_coords]), self.transformation_matrix) |
| 104 | + point.set_real_coordinates(QPointF(real_coords[0][0][0], real_coords[0][0][1])) |
| 105 | + transformed_points.append(point) |
| 106 | + |
| 107 | + return transformed_points |
| 108 | + |
| 109 | + def inverse_transform_point(self, x, y): |
| 110 | + """Transforms real-world coordinates back to image coordinates using the inverse calibration matrix.""" |
| 111 | + if self.inverse_transformation_matrix is None: |
| 112 | + raise ValueError("Inverse transformation matrix has not been calculated yet.") |
| 113 | + |
| 114 | + real_coords = np.array([[x, y]], dtype=np.float32).reshape(-1, 1, 2) |
| 115 | + img_coords = cv2.perspectiveTransform(real_coords, self.inverse_transformation_matrix) |
| 116 | + |
| 117 | + return QPointF(img_coords[0][0][0], img_coords[0][0][1]) |
| 118 | + |
| 119 | + def image_to_real_coordinates(self, point): |
| 120 | + """Transforms image coordinates to real-world coordinates using the calibration matrix.""" |
| 121 | + if not self.calibration_done: |
| 122 | + raise RuntimeError("Calibration is not complete. Please complete calibration before transforming points.") |
| 123 | + |
| 124 | + img_coords = np.array([[point.x(), point.y()]], dtype=np.float32).reshape(-1, 1, 2) |
| 125 | + real_coords = cv2.perspectiveTransform(img_coords, self.transformation_matrix) |
| 126 | + return QPointF(real_coords[0][0][0], real_coords[0][0][1]) |
| 127 | + |
| 128 | + def refine_calibration(self, iterations=500, termination_eps=1e-6): |
| 129 | + """Refines the calibration matrix using iterative optimization.""" |
| 130 | + if len(self.calibration_points) != 4: |
| 131 | + raise ValueError("Exactly 4 calibration points are required to refine the calibration matrix.") |
| 132 | + |
| 133 | + image_points = np.array( |
| 134 | + [[p.get_image_coordinates().x(), p.get_image_coordinates().y()] for p in self.calibration_points], |
| 135 | + dtype=np.float32) |
| 136 | + real_coords = np.array( |
| 137 | + [[p.get_real_coordinates().x(), p.get_real_coordinates().y()] for p in self.calibration_points], |
| 138 | + dtype=np.float32) |
| 139 | + |
| 140 | + # Use cv2.findHomography with RANSAC to refine the transformation matrix |
| 141 | + self.transformation_matrix, _ = cv2.findHomography(image_points, real_coords, method=cv2.RANSAC, ransacReprojThreshold=5.0, maxIters=iterations, confidence=0.99) |
| 142 | + self.inverse_transformation_matrix = np.linalg.inv(self.transformation_matrix) |
| 143 | + |
| 144 | + self.calibration_done = True |
| 145 | + self.main_window.interpolationAction.setEnabled(True) |
| 146 | + def advanced_corner_detection(self, image): |
| 147 | + """Improves corner detection using optimized algorithms.""" |
| 148 | + gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
| 149 | + gray = cv2.GaussianBlur(gray, (5, 5), 0) |
| 150 | + |
| 151 | + # Enhanced contrast and adaptive thresholding |
| 152 | + clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) |
| 153 | + gray = clahe.apply(gray) |
| 154 | + gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) |
| 155 | + |
| 156 | + # Edge detection |
| 157 | + edges = cv2.Canny(gray, 50, 150, apertureSize=3) |
| 158 | + |
| 159 | + # Line detection using Hough Transform |
| 160 | + lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 50, minLineLength=50, maxLineGap=10) |
| 161 | + |
| 162 | + line_img = np.zeros_like(image) |
| 163 | + if lines is not None: |
| 164 | + for line in lines: |
| 165 | + x1, y1, x2, y2 = line[0] |
| 166 | + cv2.line(line_img, (x1, y1), (x2, y2), (255, 0, 0), 2) |
| 167 | + |
| 168 | + # Find line intersections |
| 169 | + intersections = self.find_intersections(lines) |
| 170 | + |
| 171 | + # Corner detection using Shi-Tomasi algorithm on the edge image |
| 172 | + corners = cv2.goodFeaturesToTrack(edges, maxCorners=100, qualityLevel=0.01, minDistance=10) |
| 173 | + corners = np.float32(corners) |
| 174 | + |
| 175 | + # Refining corner locations using cornerSubPix for sub-pixel accuracy |
| 176 | + criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.01) |
| 177 | + corners = cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria) |
| 178 | + |
| 179 | + # Only keep corners that are close to detected lines and intersections |
| 180 | + refined_corners = [] |
| 181 | + for corner in corners: |
| 182 | + x, y = corner.ravel() |
| 183 | + if self.is_near_line(x, y, lines) or self.is_near_intersection(x, y, intersections): |
| 184 | + refined_corners.append(QPointF(x, y)) |
| 185 | + self.main_window.image_view.draw_detected_corners(refined_corners) |
| 186 | + |
| 187 | + self.main_window.image_view.set_image(image) |
| 188 | + self.main_window.update_image() |
| 189 | + |
| 190 | + return refined_corners |
| 191 | + |
| 192 | + def is_near_line(self, x, y, lines, threshold=5): |
| 193 | + """Checks if a point (x, y) is near any of the detected lines.""" |
| 194 | + if lines is not None: |
| 195 | + for line in lines: |
| 196 | + x1, y1, x2, y2 = line[0] |
| 197 | + distance = self.point_line_distance(x, y, x1, y1, x2, y2) |
| 198 | + if distance < threshold: |
| 199 | + return True |
| 200 | + return False |
| 201 | + |
| 202 | + def point_line_distance(self, px, py, x1, y1, x2, y2): |
| 203 | + """Calculates the minimum distance from a point to a line segment.""" |
| 204 | + line_mag = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) |
| 205 | + if line_mag < 1e-10: |
| 206 | + return np.sqrt((px - x1) ** 2 + (py - y1) ** 2) |
| 207 | + u = ((px - x1) * (x2 - x1) + (py - y1)) / line_mag ** 2 |
| 208 | + if u < 0 or u > 1: |
| 209 | + ix = min(max(x1, x2), max(px, px)) |
| 210 | + iy = min(max(y1, y2), max(py, py)) |
| 211 | + else: |
| 212 | + ix = x1 + u * (x2 - x1) |
| 213 | + iy = y1 + u * (y2 - y1) |
| 214 | + return np.sqrt((px - ix) ** 2 + (py - iy) ** 2) |
| 215 | + |
| 216 | + def find_intersections(self, lines): |
| 217 | + """Finds intersections between lines.""" |
| 218 | + intersections = [] |
| 219 | + if lines is not None: |
| 220 | + for i in range(len(lines)): |
| 221 | + for j in range(i + 1, len(lines)): |
| 222 | + x1, y1, x2, y2 = lines[i][0] |
| 223 | + x3, y3, x4, y4 = lines[j][0] |
| 224 | + denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) |
| 225 | + if denom != 0: |
| 226 | + intersect_x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / denom |
| 227 | + intersect_y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / denom |
| 228 | + intersections.append((intersect_x, intersect_y)) |
| 229 | + return intersections |
| 230 | + |
| 231 | + def is_near_intersection(self, x, y, intersections, threshold=5): |
| 232 | + """Checks if a point (x, y) is near any of the detected intersections.""" |
| 233 | + for intersect_x, intersect_y in intersections: |
| 234 | + distance = np.sqrt((x - intersect_x) ** 2 + (y - intersect_y) ** 2) |
| 235 | + if distance < threshold: |
| 236 | + return True |
| 237 | + return False |
| 238 | + |
| 239 | + def automatic_calibration(self, image): |
| 240 | + """Automatically calibrates the image using enhanced corner detection.""" |
| 241 | + print("Starting automatic calibration...") |
| 242 | + self.automatic_calibration_mode = True |
| 243 | + self.calibration_cancelled = False # Reset cancellation flag |
| 244 | + corners = self.advanced_corner_detection(image) |
| 245 | + |
| 246 | + if len(corners) >= 10: |
| 247 | + corners = random.sample(corners, 4) # Take 4 random corners |
| 248 | + self.calibration_points = [] |
| 249 | + for corner in corners: |
| 250 | + if not self.calibration_cancelled: |
| 251 | + self.add_calibration_point(corner, automatic=True) |
| 252 | + if len(self.calibration_points) == 4: |
| 253 | + self.calculate_transformation_matrix() |
| 254 | + else: |
| 255 | + self.clear_calibration_points() |
| 256 | + else: |
| 257 | + print("Not enough corners detected for calibration.") |
| 258 | + self.automatic_calibration_mode = False |
| 259 | + |
| 260 | + def select_random_point(self): |
| 261 | + """Selects a random point from detected corners for automatic calibration.""" |
| 262 | + corners = self.advanced_corner_detection(self.main_window.image_processor.image) |
| 263 | + if corners: |
| 264 | + return random.choice(corners) |
| 265 | + return None |
0 commit comments