Skip to content

Commit c3e0450

Browse files
author
arch
committed
add supervised tracking option
1 parent 356737f commit c3e0450

File tree

4 files changed

+71
-37
lines changed

4 files changed

+71
-37
lines changed

funscript_editor/algorithms/funscriptgenerator.py

Lines changed: 48 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ class FunscriptGeneratorParameter:
3737
track_men: bool
3838
metric: str
3939
projection: str
40+
supervised_tracking: bool = True
4041

4142
# Settings
4243
start_frame: int = 0 # default is video start (input: set current video position)
@@ -131,22 +132,23 @@ def determine_preview_scaling(self, frame_width, frame_height) -> None:
131132
self.params.preview_scaling = float(SETTINGS['preview_scaling']) * max(scale)
132133

133134

134-
def drawBox(self, img: np.ndarray, bbox: tuple) -> np.ndarray:
135+
def draw_box(self, img: np.ndarray, bbox: tuple, color: tuple = (255, 0, 255)) -> np.ndarray:
135136
""" Draw an tracking box on the image/frame
136137
137138
Args:
138139
img (np.ndarray): opencv image
139140
bbox (tuple): tracking box with (x,y,w,h)
141+
color (tuple): RGB color values for the box
140142
141143
Returns:
142144
np.ndarray: opencv image with annotated tracking box
143145
"""
144146
annotated_img = img.copy()
145-
cv2.rectangle(annotated_img, (bbox[0], bbox[1]), ((bbox[0]+bbox[2]), (bbox[1]+bbox[3])), (255, 0, 255), 3, 1)
147+
cv2.rectangle(annotated_img, (bbox[0], bbox[1]), ((bbox[0]+bbox[2]), (bbox[1]+bbox[3])), color, 3, 1)
146148
return annotated_img
147149

148150

149-
def drawFPS(self, img: np.ndarray) -> np.ndarray:
151+
def draw_fps(self, img: np.ndarray) -> np.ndarray:
150152
""" Draw processing FPS on the image/frame
151153
152154
Args:
@@ -164,7 +166,7 @@ def drawFPS(self, img: np.ndarray) -> np.ndarray:
164166
return annotated_img
165167

166168

167-
def drawTime(self, img: np.ndarray, frame_num: int) -> np.ndarray:
169+
def draw_time(self, img: np.ndarray, frame_num: int) -> np.ndarray:
168170
""" Draw Time on the image/frame
169171
170172
Args:
@@ -191,7 +193,7 @@ def drawTime(self, img: np.ndarray, frame_num: int) -> np.ndarray:
191193
return annotated_img
192194

193195

194-
def drawText(self, img: np.ndarray, txt: str, y :int = 50, color :tuple = (0,0,255)) -> np.ndarray:
196+
def draw_text(self, img: np.ndarray, txt: str, y :int = 50, color :tuple = (0,0,255)) -> np.ndarray:
195197
""" Draw text to an image/frame
196198
197199
Args:
@@ -525,9 +527,9 @@ def get_vr_projection_config(self, image :np.ndarray) -> dict:
525527
parameter_changed = False
526528
preview = FFmpegStream.get_projection(image, config)
527529

528-
preview = self.drawText(preview, "Press 'q' to use current selected region of interest)",
530+
preview = self.draw_text(preview, "Press 'q' to use current selected region of interest)",
529531
y = 50, color = (255, 0, 0))
530-
preview = self.drawText(preview, "VR Projection: Use 'w', 's' to move up/down to the region of interest",
532+
preview = self.draw_text(preview, "VR Projection: Use 'w', 's' to move up/down to the region of interest",
531533
y = 75, color = (0, 255, 0))
532534

533535
cv2.imshow(self.window_name, self.preview_scaling(preview))
@@ -559,7 +561,7 @@ def __show_loading_screen(self, shape: tuple, txt: str = "Please wait...") -> No
559561
"""
560562
try:
561563
background = np.full(shape, 0, dtype=np.uint8)
562-
loading_screen = self.drawText(background, txt)
564+
loading_screen = self.draw_text(background, txt)
563565
cv2.imshow(self.window_name, self.preview_scaling(loading_screen))
564566
cv2.waitKey(1)
565567
except: pass
@@ -575,12 +577,12 @@ def get_bbox(self, image: np.ndarray, txt: str) -> tuple:
575577
Returns:
576578
tuple: the entered box tuple (x,y,w,h)
577579
"""
578-
image = self.drawText(image, "Select area with Mouse and Press 'space' or 'enter' to continue",
580+
image = self.draw_text(image, "Select area with Mouse and Press 'space' or 'enter' to continue",
579581
y = 75, color = (255, 0, 0))
580582

581583
if self.params.use_zoom:
582584
while True:
583-
zoom_bbox = cv2.selectROI(self.window_name, self.drawText(image, "Zoom selected area"), False)
585+
zoom_bbox = cv2.selectROI(self.window_name, self.draw_text(image, "Zoom selected area"), False)
584586
if zoom_bbox is None or len(zoom_bbox) == 0: continue
585587
if zoom_bbox[2] < 75 or zoom_bbox[3] < 75:
586588
self.logger.error("The selected zoom area is to small")
@@ -590,7 +592,7 @@ def get_bbox(self, image: np.ndarray, txt: str) -> tuple:
590592
image = image[zoom_bbox[1]:zoom_bbox[1]+zoom_bbox[3], zoom_bbox[0]:zoom_bbox[0]+zoom_bbox[2]]
591593
image = cv2.resize(image, None, fx=self.params.zoom_factor, fy=self.params.zoom_factor)
592594

593-
image = self.drawText(image, txt)
595+
image = self.draw_text(image, txt)
594596
image = self.preview_scaling(image)
595597
while True:
596598
bbox = cv2.selectROI(self.window_name, image, False)
@@ -666,14 +668,25 @@ def tracking(self) -> str:
666668
}
667669

668670
first_frame = video.read()
669-
bboxWoman = self.get_bbox(first_frame, "Select Woman Feature")
670-
trackerWoman = StaticVideoTracker(first_frame, bboxWoman)
671-
bboxes['Woman'][1] = bboxWoman
671+
bbox_woman = self.get_bbox(first_frame, "Select Woman Feature")
672+
preview_frame = self.draw_box(first_frame, bbox_woman, color=(255,0,255))
673+
if self.params.supervised_tracking:
674+
tracking_area_woman = self.get_bbox(preview_frame, "Select the Supervised Tracking Area for the Woman Feature")
675+
preview_frame = self.draw_box(preview_frame, tracking_area_woman, color=(0,255,0))
676+
tracker_woman = StaticVideoTracker(first_frame, bbox_woman, supervised_tracking_area = tracking_area_woman)
677+
else:
678+
tracker_woman = StaticVideoTracker(first_frame, bbox_woman)
679+
bboxes['Woman'][1] = bbox_woman
672680

673681
if self.params.track_men:
674-
bboxMen = self.get_bbox(self.drawBox(first_frame, bboxWoman), "Select Men Feature")
675-
trackerMen = StaticVideoTracker(first_frame, bboxMen)
676-
bboxes['Men'][1] = bboxMen
682+
bbox_men = self.get_bbox(preview_frame, "Select Men Feature")
683+
preview_frame = self.draw_box(preview_frame, bbox_men, color=(255,0,255))
684+
if self.params.supervised_tracking:
685+
tracking_area_men = self.get_bbox(preview_frame, "Select the Supervised Tracking Area for the Men Feature")
686+
tracker_men = StaticVideoTracker(first_frame, bbox_men, supervised_tracking_area = tracking_area_men)
687+
else:
688+
tracker_men = StaticVideoTracker(first_frame, bbox_men)
689+
bboxes['Men'][1] = bbox_men
677690

678691
if self.params.max_playback_fps > (self.params.skip_frames+1):
679692
cycle_time_in_ms = (float(1000) / float(self.params.max_playback_fps)) * (self.params.skip_frames+1)
@@ -700,40 +713,44 @@ def tracking(self) -> str:
700713
status = "Tracking stop at existing action point"
701714
break
702715

703-
trackerWoman.update(frame)
704-
if self.params.track_men: trackerMen.update(frame)
716+
tracker_woman.update(frame)
717+
if self.params.track_men: tracker_men.update(frame)
705718

706719
if last_frame is not None:
707720
# Process data from last step while the next tracking points get predicted.
708721
# This should improve the whole processing speed, because the tracker run in a seperate thread
709-
bboxes['Woman'][frame_num-1] = bboxWoman
710-
last_frame = self.drawBox(last_frame, bboxes['Woman'][frame_num-1])
722+
bboxes['Woman'][frame_num-1] = bbox_woman
723+
last_frame = self.draw_box(last_frame, bboxes['Woman'][frame_num-1])
724+
if self.params.supervised_tracking:
725+
last_frame = self.draw_box(last_frame, tracking_area_woman, color=(0,255,0))
711726

712727
if self.params.track_men:
713-
bboxes['Men'][frame_num-1] = bboxMen
714-
last_frame = self.drawBox(last_frame, bboxes['Men'][frame_num-1])
728+
bboxes['Men'][frame_num-1] = bbox_men
729+
last_frame = self.draw_box(last_frame, bboxes['Men'][frame_num-1])
730+
if self.params.supervised_tracking:
731+
last_frame = self.draw_box(last_frame, tracking_area_men, color=(0,255,0))
715732

716-
last_frame = self.drawFPS(last_frame)
733+
last_frame = self.draw_fps(last_frame)
717734
cv2.putText(last_frame, "Press 'q' if the tracking point shifts or a video cut occured",
718735
(self.x_text_start, 75), cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (255,0,0), 2)
719-
last_frame = self.drawTime(last_frame, frame_num + self.params.start_frame)
736+
last_frame = self.draw_time(last_frame, frame_num + self.params.start_frame)
720737
cv2.imshow(self.window_name, self.preview_scaling(last_frame))
721738

722739
if self.was_key_pressed('q') or cv2.waitKey(1) == ord('q'):
723740
status = 'Tracking stopped by user'
724741
bboxes = self.delete_last_tracking_predictions(bboxes, int((self.get_average_tracking_fps()+1)*2.0))
725742
break
726743

727-
(successWoman, bboxWoman) = trackerWoman.result()
728-
if not successWoman:
729-
status = 'Tracker Woman Lost'
744+
(woman_tracker_status, bbox_woman) = tracker_woman.result()
745+
if woman_tracker_status != "OK":
746+
status = 'Woman ' + woman_tracker_status
730747
bboxes = self.delete_last_tracking_predictions(bboxes, (self.params.skip_frames+1)*3)
731748
break
732749

733750
if self.params.track_men:
734-
(successMen, bboxMen) = trackerMen.result()
735-
if not successMen:
736-
status = 'Tracking Men Lost'
751+
(men_tracker_status, bbox_men) = tracker_men.result()
752+
if men_tracker_status != "OK":
753+
status = 'Men ' + men_tracker_status
737754
bboxes = self.delete_last_tracking_predictions(bboxes, (self.params.skip_frames+1)*3)
738755
break
739756

funscript_editor/algorithms/videotracker.py

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
""" Video Tracker """
22

3+
from os import supports_effective_ids
34
import cv2
45
import time
56
import logging
@@ -26,11 +27,13 @@ class StaticVideoTracker:
2627
def __init__(self,
2728
first_frame: np.ndarray,
2829
tracking_bbox: tuple,
29-
limit_searchspace : dict = {'h': 0.45, 'w':0.2},
30+
limit_searchspace : dict = {'h': 0.45, 'w':0.4},
31+
supervised_tracking_area: tuple = None,
3032
queue_size : int = 2):
3133
self.first_frame = first_frame
3234
self.limit_searchspace = limit_searchspace
3335
self.first_tracking_bbox = tracking_bbox
36+
self.supervised_tracking_area = supervised_tracking_area
3437
self.stopped = False
3538
self.sleep_time = 0.001
3639
self.queue_in = Queue(maxsize=queue_size)
@@ -110,6 +113,19 @@ def run(self) -> None:
110113
frame = self.queue_in.get()
111114
frame_roi = frame[y0:y1, x0:x1]
112115
success, bbox = self.tracker.update(frame_roi)
113-
if success: bbox = (int(bbox[0] + x0), int(bbox[1] + y0), int(bbox[2]), int(bbox[3]))
114-
self.queue_out.put((success, bbox))
116+
status = "Tracking Lost"
117+
if success:
118+
status = "OK"
119+
bbox = (int(bbox[0] + x0), int(bbox[1] + y0), int(bbox[2]), int(bbox[3]))
120+
if self.supervised_tracking_area is not None:
121+
if bbox[0] < self.supervised_tracking_area[0]:
122+
status = "Feature outside the specified area"
123+
elif bbox[1] < self.supervised_tracking_area[1]:
124+
status = "Feature outside the specified area"
125+
elif bbox[0] + bbox[2] > self.supervised_tracking_area[0] + self.supervised_tracking_area[2]:
126+
status = "Feature outside the specified area"
127+
elif bbox[1] + bbox[3] > self.supervised_tracking_area[1] + self.supervised_tracking_area[3]:
128+
status = "Feature outside the specified area"
129+
130+
self.queue_out.put((status, bbox))
115131

funscript_editor/ui/settings_dialog.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ def __setup_combo_boxes(self):
4848
for key in PROJECTION.keys() \
4949
if 'vr' not in key.lower() or self.include_vr])
5050
self.ui.trackingMetricComboBox.addItems(['y (up-down)', 'x (left-right)', 'euclideanDistance', 'roll (rotation)'])
51+
# self.ui.trackingMethodComboBox.addItems(['Unsupervised Woman', 'Unsupervised Woman + Men', 'Supervised Woman', 'Supervised Woman + Men'])
5152
self.ui.trackingMethodComboBox.addItems(['Woman', 'Woman + Men'])
5253

5354
def __set_tracking_metric(self, value):

linuxdeploy-plugin-conda.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ export CONDA_ALWAYS_YES="true"
9898
if [ -d "$APPDIR"/usr/conda ]; then
9999
log "WARNING: conda prefix directory exists: $APPDIR/usr/conda"
100100
log "Please make sure you perform a clean build before releases to make sure your process works properly."
101-
101+
102102
# activate environment
103103
. "$APPDIR"/usr/conda/bin/activate
104104
else
@@ -164,8 +164,8 @@ done
164164
# specific files in usr/conda/bin/ (regex could result in false replacements in other files)
165165
[ -f usr/conda/bin/python3-config ] && sed -i --follow-symlinks "s|$APPDIR_FULL|\${APPDIR}|g" usr/conda/bin/python3-config
166166
[ -f usr/conda/bin/ncursesw6-config ] && sed -i --follow-symlinks "s|$APPDIR_FULL|\${APPDIR}|g" usr/conda/bin/ncursesw6-config
167-
[ -f usr/conda/etc/fonts/fonts.conf ] && sed -i --follow-symlinks "s|<cachedir>$APPDIR_FULL|<cachedir>/tmp|g" usr/conda/etc/fonts/fonts.conf
168-
[ -f usr/conda/etc/fonts/fonts.conf ] && sed -i --follow-symlinks "s|<dir>$APPDIR_FULL|<dir>|g" usr/conda/etc/fonts/fonts.conf
167+
[ -f usr/conda/etc/fonts/fonts.conf ] && sed -i --follow-symlinks "s|<cachedir>$APPDIR_FULL|<cachedir>/tmp|g" usr/conda/etc/fonts/fonts.conf
168+
[ -f usr/conda/etc/fonts/fonts.conf ] && sed -i --follow-symlinks "s|<dir>$APPDIR_FULL|<dir>|g" usr/conda/etc/fonts/fonts.conf
169169

170170
popd
171171

0 commit comments

Comments
 (0)