Skip to content

Commit 0ecece3

Browse files
author
arch
committed
reimplement scaling part 3
1 parent 3961a18 commit 0ecece3

File tree

3 files changed

+45
-33
lines changed

3 files changed

+45
-33
lines changed

funscript_editor/algorithms/funscriptgenerator.py

Lines changed: 37 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -66,20 +66,18 @@ def __init__(self,
6666
self.window_name = "Funscript Generator ({})".format(datetime.now().strftime("%H:%M:%S"))
6767

6868
self.keypress_queue = Queue(maxsize=32)
69-
self.stopped = False
70-
self.perspective_params = {}
7169
self.x_text_start = 50
70+
self.font_size = 0.6
71+
self.tracking_fps = []
7272
self.scone_x = []
7373
self.scone_y = []
7474
self.bboxes = {
7575
'Men': [],
7676
'Woman': []
7777
}
7878

79-
cap = cv2.VideoCapture(self.params.video_path)
80-
self.fps = cap.get(cv2.CAP_PROP_FPS)
81-
self.tracking_fps = []
82-
cap.release()
79+
self.video_info = FFmpegStream.get_video_info(self.params.video_path)
80+
8381

8482
#: completed event with reference to the funscript with the predicted actions, status message and success flag
8583
funscriptCompleted = QtCore.pyqtSignal(object, str, bool)
@@ -90,22 +88,25 @@ def __init__(self,
9088
logger = logging.getLogger(__name__)
9189

9290

93-
def determine_monitor_scaling(self):
94-
frame_width = PROJECTION[self.params.projection]['parameter']['width']
95-
frame_height = PROJECTION[self.params.projection]['parameter']['height']
91+
def determine_monitor_scaling(self, frame_width, frame_height) -> float:
92+
""" Determine the scaling for current monitor setup
9693
94+
Args:
95+
frame_width (int): target frame width
96+
frame_height (int): target frame height
97+
"""
9798
scale = []
9899
try:
99100
for monitor in get_monitors():
100-
scale.append( min((monitor.width / float(frame_width), monitor.height / float(frame_height) )) )
101+
if monitor.width > monitor.height:
102+
scale.append( min((monitor.width / float(frame_width), monitor.height / float(frame_height) )) )
101103
except: pass
102104

103105
if len(scale) == 0:
104106
self.logger.error("Monitor resolution info not found")
105-
self.monitor_scale = 1.0
106107
else:
107108
# asume we use the largest monitor for scipting
108-
self.mointor_scale = max(scale)
109+
self.params.preview_scaling *= max(scale)
109110

110111

111112
def drawBox(self, img: np.ndarray, bbox: tuple) -> np.ndarray:
@@ -136,7 +137,7 @@ def drawFPS(self, img: np.ndarray) -> np.ndarray:
136137
fps = (self.params.skip_frames+1)*cv2.getTickFrequency()/(cv2.getTickCount()-self.timer)
137138
self.tracking_fps.append(fps)
138139
cv2.putText(annotated_img, str(int(fps)) + ' fps', (self.x_text_start, 50),
139-
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)
140+
cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (0,0,255), 2)
140141
self.timer = cv2.getTickCount()
141142
return annotated_img
142143

@@ -154,7 +155,7 @@ def drawText(self, img: np.ndarray, txt: str, y :int = 50, color :tuple = (0,0,2
154155
np.ndarray: opencv image with text
155156
"""
156157
annotated_img = img.copy()
157-
cv2.putText(annotated_img, str(txt), (self.x_text_start, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
158+
cv2.putText(annotated_img, str(txt), (self.x_text_start, y), cv2.FONT_HERSHEY_SIMPLEX, self.font_size, color, 2)
158159
return annotated_img
159160

160161

@@ -212,17 +213,17 @@ def min_max_selector(self,
212213
image = np.concatenate((image_min, image_max), axis=1)
213214

214215
if info != "":
215-
cv2.putText(image, "Info: "+info, (self.x_text_start, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)
216+
cv2.putText(image, "Info: "+info, (self.x_text_start, 75), cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (255,0,0), 2)
216217

217218
if title_min != "":
218-
cv2.putText(image, title_min, (self.x_text_start, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)
219+
cv2.putText(image, title_min, (self.x_text_start, 25), cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (255,0,0), 2)
219220

220221
if title_max != "":
221222
cv2.putText(image, title_max, (image_min.shape[1] + self.x_text_start, 25),
222-
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)
223+
cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (255,0,0), 2)
223224

224225
cv2.putText(image, "Use 'space' to quit and set the trackbar values",
225-
(self.x_text_start, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)
226+
(self.x_text_start, 100), cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (255,0,0), 2)
226227

227228
self.clear_keypress_queue()
228229
trackbarValueMin = lower_limit
@@ -231,9 +232,9 @@ def min_max_selector(self,
231232
try:
232233
preview = image.copy()
233234
cv2.putText(preview, "Set {} to {}".format('Min', trackbarValueMin),
234-
(self.x_text_start, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)
235+
(self.x_text_start, 50), cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (0,0,255), 2)
235236
cv2.putText(preview, "Set {} to {}".format('Max', trackbarValueMax),
236-
(image_min.shape[1] + self.x_text_start, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)
237+
(image_min.shape[1] + self.x_text_start, 50), cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (0,0,255), 2)
237238
cv2.imshow(self.window_name, self.preview_scaling(preview))
238239
if self.was_space_pressed() or cv2.waitKey(25) == ord(' '): break
239240
trackbarValueMin = cv2.getTrackbarPos("Min", self.window_name)
@@ -411,7 +412,12 @@ def preview_scaling(self, preview_image :np.ndarray) -> np.ndarray:
411412
Returns:
412413
np.ndarray: scaled opencv image
413414
"""
414-
return cv2.resize(preview_image, None, fx=self.params.preview_scaling, fy=self.params.preview_scaling)
415+
return cv2.resize(
416+
preview_image,
417+
None,
418+
fx=self.params.preview_scaling,
419+
fy=self.params.preview_scaling
420+
)
415421

416422

417423
def get_vr_projection_config(self, image :np.ndarray) -> None:
@@ -422,6 +428,8 @@ def get_vr_projection_config(self, image :np.ndarray) -> None:
422428
"""
423429
config = PROJECTION[self.params.projection]
424430

431+
self.mointor_scale = self.determine_monitor_scaling(config['parameter']['width'], config['parameter']['height'])
432+
425433
# NOTE: improve processing speed to make this menu more responsive
426434
if image.shape[0] > 6000 or image.shape[1] > 6000:
427435
image = cv2.resize(image, None, fx=0.25, fy=0.25)
@@ -549,6 +557,8 @@ def get_flat_projection_config(self,
549557
scaling = config['parameter']['height'] / float(h)
550558
config['parameter']['width'] = round(w * scaling)
551559

560+
self.mointor_scale = self.determine_monitor_scaling(config['parameter']['width'], config['parameter']['height'])
561+
552562
return config
553563

554564

@@ -622,7 +632,7 @@ def tracking(self) -> str:
622632

623633
last_frame = self.drawFPS(last_frame)
624634
cv2.putText(last_frame, "Press 'q' if the tracking point shifts or a video cut occured",
625-
(self.x_text_start, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)
635+
(self.x_text_start, 75), cv2.FONT_HERSHEY_SIMPLEX, self.font_size, (255,0,0), 2)
626636
cv2.imshow(self.window_name, self.preview_scaling(last_frame))
627637

628638
if self.was_key_pressed('q') or cv2.waitKey(1) == ord('q'):
@@ -706,7 +716,6 @@ def finished(self, status: str, success :bool) -> None:
706716
"""
707717
cv2.destroyWindow(self.window_name)
708718
self.funscriptCompleted.emit(self.funscript, status, success)
709-
self.stopped = True
710719

711720

712721
def apply_shift(self, frame_number, position: str) -> int:
@@ -727,6 +736,7 @@ def apply_shift(self, frame_number, position: str) -> int:
727736

728737
return self.params.start_frame + frame_number
729738

739+
730740
def get_score_with_offset(self, idx_dict) -> list:
731741
""" Apply the offsets form config file
732742
@@ -766,9 +776,9 @@ def run(self) -> None:
766776
return
767777

768778
if self.params.direction != 'x':
769-
idx_dict = sp.get_local_max_and_min_idx(self.score_y, self.fps)
779+
idx_dict = sp.get_local_max_and_min_idx(self.score_y, self.video_info.fps)
770780
else:
771-
idx_dict = sp.get_local_max_and_min_idx(self.score_x, self.fps)
781+
idx_dict = sp.get_local_max_and_min_idx(self.score_x, self.video_info.fps)
772782

773783
idx_list = [x for k in ['min', 'max'] for x in idx_dict[k]]
774784
idx_list.sort()
@@ -784,15 +794,15 @@ def run(self) -> None:
784794
min(output_score) \
785795
if output_score[idx] < min(output_score) + self.params.bottom_threshold \
786796
else round(output_score[idx]),
787-
self.frame_to_millisec(self.apply_shift(idx, 'min'), self.fps)
797+
self.frame_to_millisec(self.apply_shift(idx, 'min'), self.video_info.fps)
788798
)
789799

790800
for idx in idx_dict['max']:
791801
self.funscript.add_action(
792802
max(output_score) \
793803
if output_score[idx] > max(output_score) - self.params.top_threshold \
794804
else round(output_score[idx]),
795-
self.frame_to_millisec(self.apply_shift(idx, 'max'), self.fps)
805+
self.frame_to_millisec(self.apply_shift(idx, 'max'), self.video_info.fps)
796806
)
797807

798808
self.finished(status, True)

funscript_editor/config/projection.yaml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
1+
# projection config
2+
# do not change any parameter here!
13

2-
# 3D Side-By-Side VR Video
4+
# 3D Side-By-Side 180 Dome VR Videos
35
vr_he_sbs:
46
video_filter: 'v360=input=he:in_stereo=sbs:pitch=${phi}:output=flat:d_fov=${fov}:w=${width}:h=${height}'
57
parameter:
@@ -8,6 +10,7 @@ vr_he_sbs:
810
fov: 100
911
phi: -45
1012

13+
# 2D Videos
1114
flat:
1215
video_filter: 'scale=${width}:${height}'
1316
parameter:

funscript_editor/config/settings.yaml

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,8 @@ tracking_direction: 'y'
1313
# Limit the max player speed in the tracking preview window (0 = disable limit)
1414
max_playback_fps: 0
1515

16-
# TODO
17-
preview_scaling: 1.0
16+
# Set the preview image scaling factor 1.0 should match your monitor size
17+
preview_scaling: 0.66
1818

19-
# TODO
20-
# projection: 'vr_he_sbs'
21-
projection: 'flat'
19+
# Set the video type
20+
projection: 'vr_he_sbs'

0 commit comments

Comments
 (0)