Skip to content

Commit 4defd08

Browse files
author
arch
committed
major improve of projection tracking
1 parent 06e7061 commit 4defd08

File tree

5 files changed

+366
-28
lines changed

5 files changed

+366
-28
lines changed

funscript_editor/algorithms/equirectangular.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ class Equirectangular:
2727
PHI (int) up/down angle in degree (up direction positive, down direction negative)
2828
height (int): output image height
2929
width (int): output image width
30-
RADIUS (int, optional): sphere radius
30+
queue_size (int): size of frame buffer
3131
"""
3232

3333
def __init__(self,

funscript_editor/algorithms/funscriptgenerator.py

Lines changed: 34 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from funscript_editor.definitions import VIDEO_SCALING_CONFIG_FILE
1919
from funscript_editor.utils.config import HYPERPARAMETER, SETTINGS
2020
from datetime import datetime
21-
from funscript_editor.algorithms.equirectangular import Equirectangular
21+
from funscript_editor.data.equirectangularvideostream import EquirectangularVideoStream
2222

2323
import funscript_editor.algorithms.signalprocessing as sp
2424
import numpy as np
@@ -426,7 +426,7 @@ def get_perspective_roi(self, image :np.ndarray) -> None:
426426

427427
selected = False
428428
while not selected:
429-
preview = Equirectangular.get_perspective(
429+
preview = EquirectangularVideoStream.get_perspective(
430430
image,
431431
perspective['FOV'],
432432
perspective['THETA'],
@@ -437,7 +437,7 @@ def get_perspective_roi(self, image :np.ndarray) -> None:
437437

438438
preview = self.drawText(preview, "Press 'q' to use current selected region of interest)",
439439
y = 50, color = (255, 0, 0))
440-
preview = self.drawText(preview, "Use 'w', 'a', 's', 'd' to move the region of interest",
440+
preview = self.drawText(preview, "Use 'w', 's' to move up/down to the region of interest",
441441
y = 75, color = (0, 255, 0))
442442

443443
cv2.imshow(self.window_name, preview)
@@ -446,13 +446,13 @@ def get_perspective_roi(self, image :np.ndarray) -> None:
446446
if pressed_key == "'q'":
447447
selected = True
448448
elif pressed_key == "'w'":
449-
perspective['PHI'] += 5
449+
perspective['PHI'] = min((80, perspective['PHI'] + 5))
450450
elif pressed_key == "'s'":
451-
perspective['PHI'] -= 5
452-
elif pressed_key == "'a'":
453-
perspective['THETA'] -= 5
454-
elif pressed_key == "'d'":
455-
perspective['THETA'] += 5
451+
perspective['PHI'] = max((-80, perspective['PHI'] - 5))
452+
# elif pressed_key == "'a'":
453+
# perspective['THETA'] -= 5
454+
# elif pressed_key == "'d'":
455+
# perspective['THETA'] += 5
456456

457457
if cv2.waitKey(1) in [ord('q')]:
458458
selected = True
@@ -503,40 +503,48 @@ def get_bbox(self, image: np.ndarray, txt: str) -> tuple:
503503
return bbox
504504

505505

506+
def get_first_frame(self) -> np.ndarray:
507+
""" Get the first frame image
508+
509+
Returns:
510+
np.ndarray: opencv image
511+
"""
512+
cap = cv2.VideoCapture(str(self.params.video_path))
513+
if self.params.start_frame > 0: self.stream.set(cv2.CAP_PROP_POS_FRAMES, self.params.start_frame)
514+
success, first_frame = cap.read()
515+
cap.release()
516+
return first_frame
517+
518+
506519
def tracking(self) -> str:
507520
""" Tracking function to track the features in the video
508521
509522
Returns:
510523
str: a process status message e.g. 'end of video reached'
511524
"""
512-
video = FileVideoStream(
513-
video_path=self.params.video_path,
514-
scale_determiner=self.get_scaling,
515-
start_frame=self.params.start_frame)
516-
517-
first_frame = video.read()
525+
first_frame = self.get_first_frame()
518526
if first_frame is None:
519527
return 'Video file is corrupt'
520528

521529
if self.params.use_equirectangular:
522530
self.get_perspective_roi(first_frame)
523-
first_frame = Equirectangular.get_perspective(
524-
first_frame,
525-
self.perspective_params['FOV'],
526-
self.perspective_params['THETA'],
527-
self.perspective_params['PHI'],
528-
self.perspective_params['height'],
529-
self.perspective_params['width']
530-
)
531-
video = Equirectangular(
532-
video,
531+
532+
video = EquirectangularVideoStream(
533+
self.params.video_path,
533534
self.perspective_params['FOV'],
534535
self.perspective_params['THETA'],
535536
self.perspective_params['PHI'],
536537
self.perspective_params['height'],
537-
self.perspective_params['width']
538+
self.perspective_params['width'],
539+
self.params.start_frame
538540
)
541+
else:
542+
video = FileVideoStream(
543+
video_path=self.params.video_path,
544+
scale_determiner=self.get_scaling,
545+
start_frame=self.params.start_frame)
539546

547+
first_frame = video.read()
540548
bboxWoman = self.get_bbox(first_frame, "Select Woman Feature")
541549
trackerWoman = StaticVideoTracker(first_frame, bboxWoman, limit_searchspace = not self.params.use_equirectangular)
542550
self.bboxes['Woman'].append(bboxWoman)

funscript_editor/config/settings.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ zoom_factor: 4.0
99

1010
# Convert video in normal perspective view before apply tracking. This should improve the
1111
# tracking at the border of videos, because there is the distortion very high.
12-
use_equirectangular: False
12+
use_equirectangular: True
1313

1414
# Scaling for the equirectangular preview window
1515
equirectangular_scaling: 1.0

0 commit comments

Comments
 (0)