@@ -37,6 +37,7 @@ class FunscriptGeneratorParameter:
3737 track_men : bool
3838 metric : str
3939 projection : str
40+ supervised_tracking : bool = True
4041
4142 # Settings
4243 start_frame : int = 0 # default is video start (input: set current video position)
@@ -131,22 +132,23 @@ def determine_preview_scaling(self, frame_width, frame_height) -> None:
131132 self .params .preview_scaling = float (SETTINGS ['preview_scaling' ]) * max (scale )
132133
133134
134- def drawBox (self , img : np .ndarray , bbox : tuple ) -> np .ndarray :
135+ def draw_box (self , img : np .ndarray , bbox : tuple , color : tuple = ( 255 , 0 , 255 ) ) -> np .ndarray :
135136 """ Draw an tracking box on the image/frame
136137
137138 Args:
138139 img (np.ndarray): opencv image
139140 bbox (tuple): tracking box with (x,y,w,h)
141+ color (tuple): RGB color values for the box
140142
141143 Returns:
142144 np.ndarray: opencv image with annotated tracking box
143145 """
144146 annotated_img = img .copy ()
145- cv2 .rectangle (annotated_img , (bbox [0 ], bbox [1 ]), ((bbox [0 ]+ bbox [2 ]), (bbox [1 ]+ bbox [3 ])), ( 255 , 0 , 255 ) , 3 , 1 )
147+ cv2 .rectangle (annotated_img , (bbox [0 ], bbox [1 ]), ((bbox [0 ]+ bbox [2 ]), (bbox [1 ]+ bbox [3 ])), color , 3 , 1 )
146148 return annotated_img
147149
148150
149- def drawFPS (self , img : np .ndarray ) -> np .ndarray :
151+ def draw_fps (self , img : np .ndarray ) -> np .ndarray :
150152 """ Draw processing FPS on the image/frame
151153
152154 Args:
@@ -164,7 +166,7 @@ def drawFPS(self, img: np.ndarray) -> np.ndarray:
164166 return annotated_img
165167
166168
167- def drawTime (self , img : np .ndarray , frame_num : int ) -> np .ndarray :
169+ def draw_time (self , img : np .ndarray , frame_num : int ) -> np .ndarray :
168170 """ Draw Time on the image/frame
169171
170172 Args:
@@ -191,7 +193,7 @@ def drawTime(self, img: np.ndarray, frame_num: int) -> np.ndarray:
191193 return annotated_img
192194
193195
194- def drawText (self , img : np .ndarray , txt : str , y :int = 50 , color :tuple = (0 ,0 ,255 )) -> np .ndarray :
196+ def draw_text (self , img : np .ndarray , txt : str , y :int = 50 , color :tuple = (0 ,0 ,255 )) -> np .ndarray :
195197 """ Draw text to an image/frame
196198
197199 Args:
@@ -525,9 +527,9 @@ def get_vr_projection_config(self, image :np.ndarray) -> dict:
525527 parameter_changed = False
526528 preview = FFmpegStream .get_projection (image , config )
527529
528- preview = self .drawText (preview , "Press 'q' to use current selected region of interest)" ,
530+ preview = self .draw_text (preview , "Press 'q' to use current selected region of interest)" ,
529531 y = 50 , color = (255 , 0 , 0 ))
530- preview = self .drawText (preview , "VR Projection: Use 'w', 's' to move up/down to the region of interest" ,
532+ preview = self .draw_text (preview , "VR Projection: Use 'w', 's' to move up/down to the region of interest" ,
531533 y = 75 , color = (0 , 255 , 0 ))
532534
533535 cv2 .imshow (self .window_name , self .preview_scaling (preview ))
@@ -559,7 +561,7 @@ def __show_loading_screen(self, shape: tuple, txt: str = "Please wait...") -> No
559561 """
560562 try :
561563 background = np .full (shape , 0 , dtype = np .uint8 )
562- loading_screen = self .drawText (background , txt )
564+ loading_screen = self .draw_text (background , txt )
563565 cv2 .imshow (self .window_name , self .preview_scaling (loading_screen ))
564566 cv2 .waitKey (1 )
565567 except : pass
@@ -575,12 +577,12 @@ def get_bbox(self, image: np.ndarray, txt: str) -> tuple:
575577 Returns:
576578 tuple: the entered box tuple (x,y,w,h)
577579 """
578- image = self .drawText (image , "Select area with Mouse and Press 'space' or 'enter' to continue" ,
580+ image = self .draw_text (image , "Select area with Mouse and Press 'space' or 'enter' to continue" ,
579581 y = 75 , color = (255 , 0 , 0 ))
580582
581583 if self .params .use_zoom :
582584 while True :
583- zoom_bbox = cv2 .selectROI (self .window_name , self .drawText (image , "Zoom selected area" ), False )
585+ zoom_bbox = cv2 .selectROI (self .window_name , self .draw_text (image , "Zoom selected area" ), False )
584586 if zoom_bbox is None or len (zoom_bbox ) == 0 : continue
585587 if zoom_bbox [2 ] < 75 or zoom_bbox [3 ] < 75 :
586588 self .logger .error ("The selected zoom area is to small" )
@@ -590,7 +592,7 @@ def get_bbox(self, image: np.ndarray, txt: str) -> tuple:
590592 image = image [zoom_bbox [1 ]:zoom_bbox [1 ]+ zoom_bbox [3 ], zoom_bbox [0 ]:zoom_bbox [0 ]+ zoom_bbox [2 ]]
591593 image = cv2 .resize (image , None , fx = self .params .zoom_factor , fy = self .params .zoom_factor )
592594
593- image = self .drawText (image , txt )
595+ image = self .draw_text (image , txt )
594596 image = self .preview_scaling (image )
595597 while True :
596598 bbox = cv2 .selectROI (self .window_name , image , False )
@@ -666,14 +668,25 @@ def tracking(self) -> str:
666668 }
667669
668670 first_frame = video .read ()
669- bboxWoman = self .get_bbox (first_frame , "Select Woman Feature" )
670- trackerWoman = StaticVideoTracker (first_frame , bboxWoman )
671- bboxes ['Woman' ][1 ] = bboxWoman
671+ bbox_woman = self .get_bbox (first_frame , "Select Woman Feature" )
672+ preview_frame = self .draw_box (first_frame , bbox_woman , color = (255 ,0 ,255 ))
673+ if self .params .supervised_tracking :
674+ tracking_area_woman = self .get_bbox (preview_frame , "Select the Supervised Tracking Area for the Woman Feature" )
675+ preview_frame = self .draw_box (preview_frame , tracking_area_woman , color = (0 ,255 ,0 ))
676+ tracker_woman = StaticVideoTracker (first_frame , bbox_woman , supervised_tracking_area = tracking_area_woman )
677+ else :
678+ tracker_woman = StaticVideoTracker (first_frame , bbox_woman )
679+ bboxes ['Woman' ][1 ] = bbox_woman
672680
673681 if self .params .track_men :
674- bboxMen = self .get_bbox (self .drawBox (first_frame , bboxWoman ), "Select Men Feature" )
675- trackerMen = StaticVideoTracker (first_frame , bboxMen )
676- bboxes ['Men' ][1 ] = bboxMen
682+ bbox_men = self .get_bbox (preview_frame , "Select Men Feature" )
683+ preview_frame = self .draw_box (preview_frame , bbox_men , color = (255 ,0 ,255 ))
684+ if self .params .supervised_tracking :
685+ tracking_area_men = self .get_bbox (preview_frame , "Select the Supervised Tracking Area for the Men Feature" )
686+ tracker_men = StaticVideoTracker (first_frame , bbox_men , supervised_tracking_area = tracking_area_men )
687+ else :
688+ tracker_men = StaticVideoTracker (first_frame , bbox_men )
689+ bboxes ['Men' ][1 ] = bbox_men
677690
678691 if self .params .max_playback_fps > (self .params .skip_frames + 1 ):
679692 cycle_time_in_ms = (float (1000 ) / float (self .params .max_playback_fps )) * (self .params .skip_frames + 1 )
@@ -700,40 +713,44 @@ def tracking(self) -> str:
700713 status = "Tracking stop at existing action point"
701714 break
702715
703- trackerWoman .update (frame )
704- if self .params .track_men : trackerMen .update (frame )
716+ tracker_woman .update (frame )
717+ if self .params .track_men : tracker_men .update (frame )
705718
706719 if last_frame is not None :
707720 # Process data from last step while the next tracking points get predicted.
708721 # This should improve the whole processing speed, because the tracker run in a seperate thread
709- bboxes ['Woman' ][frame_num - 1 ] = bboxWoman
710- last_frame = self .drawBox (last_frame , bboxes ['Woman' ][frame_num - 1 ])
722+ bboxes ['Woman' ][frame_num - 1 ] = bbox_woman
723+ last_frame = self .draw_box (last_frame , bboxes ['Woman' ][frame_num - 1 ])
724+ if self .params .supervised_tracking :
725+ last_frame = self .draw_box (last_frame , tracking_area_woman , color = (0 ,255 ,0 ))
711726
712727 if self .params .track_men :
713- bboxes ['Men' ][frame_num - 1 ] = bboxMen
714- last_frame = self .drawBox (last_frame , bboxes ['Men' ][frame_num - 1 ])
728+ bboxes ['Men' ][frame_num - 1 ] = bbox_men
729+ last_frame = self .draw_box (last_frame , bboxes ['Men' ][frame_num - 1 ])
730+ if self .params .supervised_tracking :
731+ last_frame = self .draw_box (last_frame , tracking_area_men , color = (0 ,255 ,0 ))
715732
716- last_frame = self .drawFPS (last_frame )
733+ last_frame = self .draw_fps (last_frame )
717734 cv2 .putText (last_frame , "Press 'q' if the tracking point shifts or a video cut occured" ,
718735 (self .x_text_start , 75 ), cv2 .FONT_HERSHEY_SIMPLEX , self .font_size , (255 ,0 ,0 ), 2 )
719- last_frame = self .drawTime (last_frame , frame_num + self .params .start_frame )
736+ last_frame = self .draw_time (last_frame , frame_num + self .params .start_frame )
720737 cv2 .imshow (self .window_name , self .preview_scaling (last_frame ))
721738
722739 if self .was_key_pressed ('q' ) or cv2 .waitKey (1 ) == ord ('q' ):
723740 status = 'Tracking stopped by user'
724741 bboxes = self .delete_last_tracking_predictions (bboxes , int ((self .get_average_tracking_fps ()+ 1 )* 2.0 ))
725742 break
726743
727- (successWoman , bboxWoman ) = trackerWoman .result ()
728- if not successWoman :
729- status = 'Tracker Woman Lost'
744+ (woman_tracker_status , bbox_woman ) = tracker_woman .result ()
745+ if woman_tracker_status != "OK" :
746+ status = 'Woman ' + woman_tracker_status
730747 bboxes = self .delete_last_tracking_predictions (bboxes , (self .params .skip_frames + 1 )* 3 )
731748 break
732749
733750 if self .params .track_men :
734- (successMen , bboxMen ) = trackerMen .result ()
735- if not successMen :
736- status = 'Tracking Men Lost'
751+ (men_tracker_status , bbox_men ) = tracker_men .result ()
752+ if men_tracker_status != "OK" :
753+ status = 'Men ' + men_tracker_status
737754 bboxes = self .delete_last_tracking_predictions (bboxes , (self .params .skip_frames + 1 )* 3 )
738755 break
739756
0 commit comments