Skip to content

Commit ea0010e

Browse files
author
arch
committed
store trcked points in class
1 parent 64b007d commit ea0010e

File tree

3 files changed

+89
-16
lines changed

3 files changed

+89
-16
lines changed

funscript_editor/algorithms/funscriptgenerator.py

Lines changed: 61 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ def __init__(self,
9898
self.params = params
9999
self.funscripts = funscripts
100100
self.video_info = FFmpegStream.get_video_info(self.params.video_path)
101+
self.tracking_points = {}
101102
self.score = {
102103
'x': [],
103104
'y': [],
@@ -252,17 +253,9 @@ def scale_score(self, status: str, metric : str = 'y') -> None:
252253
min_frame = np.argmin(np.array(self.score[metric])) + self.params.start_frame
253254
max_frame = np.argmax(np.array(self.score[metric])) + self.params.start_frame
254255

255-
if False:
256-
cap = cv2.VideoCapture(self.params.video_path)
257-
cap.set(cv2.CAP_PROP_POS_FRAMES, min_frame)
258-
success_min, imgMin = cap.read()
259-
cap.set(cv2.CAP_PROP_POS_FRAMES, max_frame)
260-
success_max, imgMax = cap.read()
261-
cap.release()
262-
else:
263-
success_min, success_max = True, True
264-
imgMin = FFmpegStream.get_frame(self.params.video_path, min_frame)
265-
imgMax = FFmpegStream.get_frame(self.params.video_path, max_frame)
256+
success_min, success_max = True, True
257+
imgMin = FFmpegStream.get_frame(self.params.video_path, min_frame)
258+
imgMax = FFmpegStream.get_frame(self.params.video_path, max_frame)
266259

267260
if success_min and success_max:
268261
if self.is_vr_video():
@@ -283,6 +276,20 @@ def scale_score(self, status: str, metric : str = 'y') -> None:
283276
imgMin = cv2.resize(imgMin, None, fx=scale, fy=scale)
284277
imgMax = cv2.resize(imgMax, None, fx=scale, fy=scale)
285278

279+
min_tracking_points = self.get_tracking_points_by_frame_number(min_frame - self.params.start_frame)
280+
max_tracking_points = self.get_tracking_points_by_frame_number(max_frame - self.params.start_frame)
281+
282+
# TODO: draw points to image
283+
# NOTE: Code below does not work because image do not use the same projection
284+
285+
# for points in min_tracking_points:
286+
# imgMin = OpenCV_GUI.draw_point_to_image(imgMin, points)
287+
288+
# for points in max_tracking_points:
289+
# imgMax = OpenCV_GUI.draw_point_to_image(imgMax, points)
290+
291+
# print('min_tracking_points', min_tracking_points, 'max_tracking_points', max_tracking_points)
292+
286293
(desired_min, desired_max) = self.ui.min_max_selector(
287294
image_min = imgMin,
288295
image_max = imgMax,
@@ -654,10 +661,53 @@ def tracking(self) -> str:
654661
self.logger.info(status)
655662
self.logger.info('Interpolate tracking boxes')
656663
interpolated_bboxes = self.interpolate_bboxes(bboxes)
664+
self.tracking_points = self.determine_tracking_points(interpolated_bboxes)
657665
self.calculate_score(interpolated_bboxes)
658666
return status
659667

660668

669+
def determine_tracking_points(self, interpolated_bboxes: dict) -> dict:
670+
""" Determine the final tracking points
671+
672+
Args:
673+
interpolate_bboxes (dict): interpolate bboxes from all trackers
674+
675+
Returns:
676+
dict: final tracking points
677+
"""
678+
result = {}
679+
for tracker_type in interpolated_bboxes.keys():
680+
result[tracker_type] = {}
681+
for tracker_number in interpolated_bboxes[tracker_type].keys():
682+
result[tracker_type][tracker_number] = [self.get_center(item) for item in interpolated_bboxes[tracker_type][tracker_number]]
683+
684+
return result
685+
686+
687+
def get_tracking_points_by_frame_number(self, relative_frame_number: int) -> list:
688+
""" Get tracking points by frame number
689+
690+
Args:
691+
relative_frame_number (int): relative frame number
692+
693+
Returns:
694+
list: all tracking points
695+
"""
696+
result = []
697+
for tracker_type in self.tracking_points.keys():
698+
for idx, tracker_number in enumerate([k for k in self.tracking_points[tracker_type].keys()]):
699+
if len(result) < idx + 1:
700+
if relative_frame_number < len(self.tracking_points[tracker_type][tracker_number]):
701+
result.append([self.tracking_points[tracker_type][tracker_number][relative_frame_number]])
702+
else:
703+
result.append([])
704+
else:
705+
if relative_frame_number < len(self.tracking_points[tracker_type][tracker_number]):
706+
result[idx].append(self.tracking_points[tracker_type][tracker_number][relative_frame_number])
707+
708+
return result
709+
710+
661711
def finished(self, status: str, success :bool) -> None:
662712
""" Process necessary steps to complete the predicted funscript
663713

funscript_editor/config/settings.yaml

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,6 @@ tracker: 'CSRT'
2020
# Specify the wav file in the config directory to play when tracking finished (write 'off' to disable the sound notification)
2121
notification_sound: 'off'
2222

23-
# Time in milliseconds at which the tracking is stopped if the selected feature is not found
24-
# THIS feature has no effekt since v0.1.2!
25-
tracking_lost_time: 0
26-
2723
# Specify the scene detector algorithm. Tracking pause at detected scene changes. Available options:
2824
# - 'CSV': load scene csv file from [PySceneDetect](https://github.com/Breakthrough/PySceneDetect) if available
2925
# - 'CONTENT': Detects cuts using changes in colour and intensity between frames.

funscript_editor/ui/opencvui.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def draw_box_to_image(image: np.ndarray, bbox, color: tuple = (255, 0, 255)) ->
228228
229229
Args:
230230
image (np.ndarray): image
231-
bbox (tuple): tracking box with (x,y,w,h)
231+
bbox (tuple or list): tracking box with (x,y,w,h) or list of tracking boxes
232232
color (tuple): RGB color values for the box
233233
234234
Returns:
@@ -259,6 +259,33 @@ def draw_box_to_image(image: np.ndarray, bbox, color: tuple = (255, 0, 255)) ->
259259
return image
260260

261261

262+
@staticmethod
263+
def draw_point_to_image(image: np.ndarray, point, color: tuple = (255, 0, 255)) -> np.ndarray:
264+
""" Draw an point to given image
265+
266+
Args:
267+
image (np.ndarray): image
268+
point (tuple or list): points (x,y) or list of points
269+
color (tuple): RGB color values for the box
270+
271+
Returns:
272+
np.ndarray: opencv image
273+
"""
274+
if not isinstance(point, list):
275+
point = [point]
276+
277+
for p in point:
278+
cv2.circle(
279+
image,
280+
(p[0], p[1]),
281+
4,
282+
color,
283+
2
284+
)
285+
286+
return image
287+
288+
262289
def get_preview_fps(self) -> float:
263290
""" Get current processing FPS
264291

0 commit comments

Comments
 (0)