diff --git a/SerialPrograms/Source/ML/DataLabeling/ML_SegmentAnythingModel.cpp b/SerialPrograms/Source/ML/DataLabeling/ML_SegmentAnythingModel.cpp index 60b9a63dec..13cf30ba93 100644 --- a/SerialPrograms/Source/ML/DataLabeling/ML_SegmentAnythingModel.cpp +++ b/SerialPrograms/Source/ML/DataLabeling/ML_SegmentAnythingModel.cpp @@ -24,8 +24,8 @@ namespace PokemonAutomation{ namespace ML{ -SAMEmbedderSession::SAMEmbedderSession(const std::string& model_path) - : m_session_options{create_session_options(ML_MODEL_CACHE_PATH() + "SAMEmbedder/")} +SAMEmbedderSession::SAMEmbedderSession(const std::string& model_path, bool use_gpu) + : m_session_options{create_session_options(ML_MODEL_CACHE_PATH() + "SAMEmbedder/", use_gpu)} , session{create_session(m_env, m_session_options, model_path, ML_MODEL_CACHE_PATH() + "SAMEmbedder/")} , memory_info{Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU)} , input_names{session.GetInputNames()} @@ -64,8 +64,8 @@ void SAMEmbedderSession::run(cv::Mat& input_image, std::vector& model_out } -SAMSession::SAMSession(const std::string& model_path) - : m_session_options{create_session_options(ML_MODEL_CACHE_PATH() + "SAM/")} +SAMSession::SAMSession(const std::string& model_path, bool use_gpu) + : m_session_options{create_session_options(ML_MODEL_CACHE_PATH() + "SAM/", use_gpu)} , session{create_session(m_env, m_session_options, model_path, ML_MODEL_CACHE_PATH() + "SAM/")} , memory_info{Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU)} , input_names{session.GetInputNames()} @@ -176,7 +176,7 @@ void SAMSession::run( } -void compute_embeddings_for_folder(const std::string& embedding_model_path, const std::string& image_folder_path){ +void compute_embeddings_for_folder(const std::string& embedding_model_path, const std::string& image_folder_path, bool use_gpu_for_embedder_session){ const bool recursive_search = true; std::vector all_image_paths = find_images_in_folder(image_folder_path, recursive_search); if (all_image_paths.size() == 0){ @@ -200,7 +200,8 @@ void compute_embeddings_for_folder(const std::string& embedding_model_path, cons return; } - SAMEmbedderSession embedding_session(embedding_model_path); + bool use_gpu = use_gpu_for_embedder_session; + std::unique_ptr embedding_session = make_unique(embedding_model_path, use_gpu); std::vector output_image_embedding; for (size_t i = 0; i < all_image_paths.size(); i++){ const auto& image_path = all_image_paths[i]; @@ -236,7 +237,43 @@ void compute_embeddings_for_folder(const std::string& embedding_model_path, cons cv::resize(image, resized_mat, cv::Size(SAM_EMBEDDER_INPUT_IMAGE_WIDTH, SAM_EMBEDDER_INPUT_IMAGE_HEIGHT)); output_image_embedding.clear(); - embedding_session.run(resized_mat, output_image_embedding); + + // fall back to CPU if fails with GPU. + for(size_t j = 0; j < 2; j++){ + try{ + // If fails with GPU, fall back to CPU. + // throw Ort::Exception("Testing.", ORT_FAIL); // to simulate GPU/CPU failure + embedding_session->run(resized_mat, output_image_embedding); + break; + }catch(Ort::Exception& e){ + if (use_gpu){ + std::cerr << "Warning: Embedding session failed using the GPU. Will reattempt with the CPU.\n" << e.what() << std::endl; + use_gpu = false; + embedding_session = make_unique(embedding_model_path, use_gpu); + }else{ + std::cerr << "Error: Embedding session failed even when using the CPU.\n" << e.what() << std::endl; + QMessageBox box; + box.warning(nullptr, "Error:", + QString::fromStdString("Error: Embedding session failed.")); + return; + } + }catch(...){ + std::cerr << "Error: Unknown error. Embedding session failed." << std::endl; + QMessageBox box; + box.warning(nullptr, "Error:", + QString::fromStdString("Error: Unknown error. Embedding session failed.")); + return; + + } + + if (j > 0){ + std::cerr << "Internal Program Error: This section of code shouldn't be reachable." << std::endl; + QMessageBox box; + box.warning(nullptr, "Error:", + QString::fromStdString("Internal Program Error: This section of code shouldn't be reachable.")); + return; + } + } save_image_embedding_to_disk(image_path, output_image_embedding); } std::cout << "Done computing embeddings for images in folder " << image_folder_path << "." << std::endl; diff --git a/SerialPrograms/Source/ML/DataLabeling/ML_SegmentAnythingModel.h b/SerialPrograms/Source/ML/DataLabeling/ML_SegmentAnythingModel.h index e4f285740a..4dae45eaff 100644 --- a/SerialPrograms/Source/ML/DataLabeling/ML_SegmentAnythingModel.h +++ b/SerialPrograms/Source/ML/DataLabeling/ML_SegmentAnythingModel.h @@ -23,12 +23,12 @@ namespace ML{ // Compute embeddings for all images in a folder. Only support .png, .jpg and .jpeg filename extensions so far. // This can be very slow! -void compute_embeddings_for_folder(const std::string& embedding_model_path, const std::string& image_folder_path); +void compute_embeddings_for_folder(const std::string& embedding_model_path, const std::string& image_folder_path, bool use_gpu_for_embedder_session); class SAMEmbedderSession{ public: - SAMEmbedderSession(const std::string& model_path); + SAMEmbedderSession(const std::string& model_path, bool use_gpu); // Given an image of shape SAM_EMBEDDER_INPUT_IMAGE_WIDTH x SAM_EMBEDDER_INPUT_IMAGE_HEIGHT, RGB channel order, // compute its image embedding as a vector of size [SAM_EMBEDDER_OUTPUT_SIZE] @@ -52,7 +52,7 @@ class SAMEmbedderSession{ // Run Segment Anything Model in an ONNX session. class SAMSession{ public: - SAMSession(const std::string& model_path); + SAMSession(const std::string& model_path, bool use_gpu); // embedding: input embedding // input_points: input point coordinates (x, y) in pixel units. [p0_x, p0_y, p1_x, p1_y, p2_x, ...]. diff --git a/SerialPrograms/Source/ML/Inference/ML_YOLOv5Detector.cpp b/SerialPrograms/Source/ML/Inference/ML_YOLOv5Detector.cpp index 8a74038c32..9f7ee55db4 100644 --- a/SerialPrograms/Source/ML/Inference/ML_YOLOv5Detector.cpp +++ b/SerialPrograms/Source/ML/Inference/ML_YOLOv5Detector.cpp @@ -31,6 +31,8 @@ YOLOv5Detector::~YOLOv5Detector() = default; YOLOv5Detector::YOLOv5Detector(const std::string& model_path) + : m_model_path(model_path) + , m_use_gpu(true) { if (!model_path.ends_with(".onnx")){ throw InternalProgramError(nullptr, PA_CURRENT_FUNCTION, @@ -76,7 +78,7 @@ YOLOv5Detector::YOLOv5Detector(const std::string& model_path) } label_file.close(); - m_yolo_session = std::make_unique(model_path, std::move(labels)); + m_yolo_session = std::make_unique(m_model_path, std::move(labels), m_use_gpu); } bool YOLOv5Detector::detect(const ImageViewRGB32& screen){ @@ -89,7 +91,33 @@ bool YOLOv5Detector::detect(const ImageViewRGB32& screen){ cv::cvtColor(frame_mat_bgra, frame_mat_rgb, cv::COLOR_BGRA2RGB); m_output_boxes.clear(); - m_yolo_session->run(frame_mat_rgb, m_output_boxes); + + // fall back to CPU if fails with GPU. + for(size_t i = 0; i < 2; i++){ + try{ + // if (m_use_gpu){ throw Ort::Exception("Testing.", ORT_FAIL); } // to simulate GPU/CPU failure + // If fails with GPU, fall back to CPU. + m_yolo_session->run(frame_mat_rgb, m_output_boxes); + break; + }catch(Ort::Exception& e){ + if (m_use_gpu){ + std::cerr << "Warning: YOLO session failed using the GPU. Will reattempt with the CPU.\n" << e.what() << std::endl; + m_use_gpu = false; + std::vector labels = m_yolo_session->get_label_names(); + m_yolo_session = std::make_unique(m_model_path, std::move(labels), m_use_gpu); + }else{ + throw InternalProgramError(nullptr, PA_CURRENT_FUNCTION, "Error: YOLO session failed even when using the CPU." + std::string(e.what())); + } + }catch(...){ + throw InternalProgramError(nullptr, PA_CURRENT_FUNCTION, "Unknown error: YOLO session failed."); + + } + + if (i > 0){ + throw InternalProgramError(nullptr, PA_CURRENT_FUNCTION, "Internal Program Error: This section of code shouldn't be reachable."); + } + } + return m_output_boxes.size() > 0; } diff --git a/SerialPrograms/Source/ML/Inference/ML_YOLOv5Detector.h b/SerialPrograms/Source/ML/Inference/ML_YOLOv5Detector.h index 2be057b579..51537633c1 100644 --- a/SerialPrograms/Source/ML/Inference/ML_YOLOv5Detector.h +++ b/SerialPrograms/Source/ML/Inference/ML_YOLOv5Detector.h @@ -41,6 +41,9 @@ class YOLOv5Detector : public StaticScreenDetector{ const std::unique_ptr& session() const { return m_yolo_session; } protected: + std::string m_model_path; + bool m_use_gpu; + // std::vector m_labels; std::unique_ptr m_yolo_session; std::vector m_output_boxes; }; diff --git a/SerialPrograms/Source/ML/Models/ML_ONNXRuntimeHelpers.cpp b/SerialPrograms/Source/ML/Models/ML_ONNXRuntimeHelpers.cpp index 8d09788905..32ab505e6b 100644 --- a/SerialPrograms/Source/ML/Models/ML_ONNXRuntimeHelpers.cpp +++ b/SerialPrograms/Source/ML/Models/ML_ONNXRuntimeHelpers.cpp @@ -41,9 +41,11 @@ std::string create_file_hash(const std::string& filepath){ } -Ort::SessionOptions create_session_options(const std::string& model_cache_path){ +Ort::SessionOptions create_session_options(const std::string& model_cache_path, bool use_gpu){ Ort::SessionOptions so; std::cout << "Set potential model cache path in session options: " << model_cache_path << std::endl; + +if (use_gpu){ #if __APPLE__ // create session using Apple ML acceleration library CoreML std::unordered_map provider_options; @@ -58,8 +60,6 @@ Ort::SessionOptions create_session_options(const std::string& model_cache_path){ so.AppendExecutionProvider("CoreML", provider_options); std::cout << "Using CoreML execution provider for GPU acceleration" << std::endl; #elif _WIN32 -#define ENABLE_WIN_GPU -#ifdef ENABLE_WIN_GPU // Try CUDA first for NVIDIA GPUs (best performance) // CUDA requires NVIDIA GPU and CUDA runtime installation // See: https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html @@ -85,8 +85,8 @@ Ort::SessionOptions create_session_options(const std::string& model_cache_path){ std::cout << "DirectML execution provider not available, falling back to CPU: " << e.what() << std::endl; } } -#endif // ENABLE_WIN_GPU #endif +} // CPU fallback is always available return so; diff --git a/SerialPrograms/Source/ML/Models/ML_ONNXRuntimeHelpers.h b/SerialPrograms/Source/ML/Models/ML_ONNXRuntimeHelpers.h index b2dac93d0f..22181257d6 100644 --- a/SerialPrograms/Source/ML/Models/ML_ONNXRuntimeHelpers.h +++ b/SerialPrograms/Source/ML/Models/ML_ONNXRuntimeHelpers.h @@ -23,7 +23,7 @@ namespace ML{ // // model_cache_path: the path to store model caches. This path is better // to be unique for each model for easier file management. -Ort::SessionOptions create_session_options(const std::string& model_cache_path); +Ort::SessionOptions create_session_options(const std::string& model_cache_path, bool use_gpu); // Create an ONNX Session. It will also update the model cache on macOS if necessary. diff --git a/SerialPrograms/Source/ML/Models/ML_YOLOv5Model.cpp b/SerialPrograms/Source/ML/Models/ML_YOLOv5Model.cpp index 5cbaf11fd6..3929704426 100644 --- a/SerialPrograms/Source/ML/Models/ML_YOLOv5Model.cpp +++ b/SerialPrograms/Source/ML/Models/ML_YOLOv5Model.cpp @@ -58,9 +58,9 @@ std::tuple resize_image_with_border( } -YOLOv5Session::YOLOv5Session(const std::string& model_path, std::vector label_names) +YOLOv5Session::YOLOv5Session(const std::string& model_path, std::vector label_names, bool use_gpu) : m_label_names(std::move(label_names)) -, m_session_options(create_session_options(ML_MODEL_CACHE_PATH() + "YOLOv5")) +, m_session_options(create_session_options(ML_MODEL_CACHE_PATH() + "YOLOv5", use_gpu)) , m_session{create_session(m_env, m_session_options, model_path, ML_MODEL_CACHE_PATH() + "YOLOv5")} , m_memory_info{Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU)} , m_input_names{m_session.GetInputNames()} diff --git a/SerialPrograms/Source/ML/Models/ML_YOLOv5Model.h b/SerialPrograms/Source/ML/Models/ML_YOLOv5Model.h index d8a247cb2f..72cac8f538 100644 --- a/SerialPrograms/Source/ML/Models/ML_YOLOv5Model.h +++ b/SerialPrograms/Source/ML/Models/ML_YOLOv5Model.h @@ -28,11 +28,13 @@ class YOLOv5Session{ size_t label_idx; }; - YOLOv5Session(const std::string& model_path, std::vector label_names); + YOLOv5Session(const std::string& model_path, std::vector label_names, bool use_gpu); void run(const cv::Mat& input_image, std::vector& detections); const std::string& label_name(size_t idx) const { return m_label_names[idx]; } + + std::vector get_label_names() const { return m_label_names; } private: const int YOLO5_INPUT_IMAGE_SIZE = 640; diff --git a/SerialPrograms/Source/ML/Programs/ML_LabelImages.cpp b/SerialPrograms/Source/ML/Programs/ML_LabelImages.cpp index bf0c80b7ac..36aeeb1746 100644 --- a/SerialPrograms/Source/ML/Programs/ML_LabelImages.cpp +++ b/SerialPrograms/Source/ML/Programs/ML_LabelImages.cpp @@ -55,6 +55,7 @@ LabelImages::LabelImages(const LabelImages_Descriptor& descriptor) : PanelInstance(descriptor) , m_display_session(m_display_option) , m_options(LockMode::UNLOCK_WHILE_RUNNING) + , m_use_gpu_for_sam_anno(true) , X("X Coordinate:", LockMode::UNLOCK_WHILE_RUNNING, 0.3, 0.0, 1.0) , Y("Y Coordinate:", LockMode::UNLOCK_WHILE_RUNNING, 0.3, 0.0, 1.0) , WIDTH("Width:", LockMode::UNLOCK_WHILE_RUNNING, 0.4, 0.0, 1.0) @@ -65,11 +66,25 @@ LabelImages::LabelImages(const LabelImages_Descriptor& descriptor) , CUSTOM_LABEL_DATABASE(create_string_select_database({"mc"})) // mc for "main character" , CUSTOM_SET_LABEL(CUSTOM_LABEL_DATABASE, LockMode::UNLOCK_WHILE_RUNNING, 0) , MANUAL_LABEL(false, LockMode::UNLOCK_WHILE_RUNNING, "", "Custom Label", true) + , USE_GPU_FOR_EMBEDDER_SESSION("Enable GPU for Embedder session:", LockMode::LOCK_WHILE_RUNNING, true) + , SELECTED_ANNO_COLOR( + "Color of selected annotation:", + { + {ColorChoice::BLACK, "black", "Black"}, + {ColorChoice::GREEN, "green", "Green"}, + {ColorChoice::ORANGE, "orange", "Orange"}, + {ColorChoice::MAGENTA, "magenta", "Magenta"}, + }, + LockMode::LOCK_WHILE_RUNNING, + ColorChoice::BLACK + ) { ADD_OPTION(LABEL_TYPE); ADD_OPTION(FORM_LABEL); ADD_OPTION(CUSTOM_SET_LABEL); ADD_OPTION(MANUAL_LABEL); + ADD_OPTION(USE_GPU_FOR_EMBEDDER_SESSION); + ADD_OPTION(SELECTED_ANNO_COLOR); X.add_listener(*this); Y.add_listener(*this); @@ -82,16 +97,7 @@ LabelImages::LabelImages(const LabelImages_Descriptor& descriptor) - // , m_sam_session{RESOURCE_PATH() + "ML/sam_cpu.onnx"} - const std::string sam_model_path = RESOURCE_PATH() + "ML/sam_cpu.onnx"; - if (std::filesystem::exists(sam_model_path)){ - m_sam_session = std::make_unique(sam_model_path); - } else{ - std::cerr << "Error: no such SAM model path " << sam_model_path << "." << std::endl; - QMessageBox box; - box.critical(nullptr, "SAM Model Does Not Exist", - QString::fromStdString("SAM model path" + sam_model_path + " does not exist.")); - } + init_sam_session(true); m_overlay_manager = new LabelImages_OverlayManager(*this); } @@ -134,6 +140,21 @@ JsonValue LabelImages::to_json() const{ return obj; } +void LabelImages::init_sam_session(bool use_gpu){ + // , m_sam_session{RESOURCE_PATH() + "ML/sam_cpu.onnx"} + + const std::string sam_model_path = RESOURCE_PATH() + "ML/sam_cpu.onnx"; + if (std::filesystem::exists(sam_model_path)){ + m_sam_session = std::make_unique(sam_model_path, use_gpu); + } else{ + std::cerr << "Error: no such SAM model path " << sam_model_path << "." << std::endl; + QMessageBox box; + box.critical(nullptr, "SAM Model Does Not Exist", + QString::fromStdString("SAM model path" + sam_model_path + " does not exist.")); + } + +} + void LabelImages::save_annotation_to_file() const{ if (m_annotation_file_path.size() == 0 || m_fail_to_load_annotation_file){ return; @@ -306,13 +327,47 @@ bool LabelImages::run_sam_to_create_annotation( input_point_labels[inclusion_points.size() + i] = 0; } - m_sam_session->run( - m_image_embedding, - (int)source_height, (int)source_width, input_points, input_point_labels, - {static_cast(user_box.min_x), static_cast(user_box.min_y), static_cast(user_box.max_x)-1, static_cast(user_box.max_y)-1}, - m_output_boolean_mask - ); + // fall back to CPU if fails with GPU. + for(size_t i = 0; i < 2; i++){ + try{ + // if (m_use_gpu_for_sam_anno){ throw Ort::Exception("Testing.", ORT_FAIL); } // to simulate GPU/CPU failure + m_sam_session->run( + m_image_embedding, + (int)source_height, (int)source_width, input_points, input_point_labels, + {static_cast(user_box.min_x), static_cast(user_box.min_y), static_cast(user_box.max_x)-1, static_cast(user_box.max_y)-1}, + m_output_boolean_mask + ); + break; + }catch(Ort::Exception& e){ + if (m_use_gpu_for_sam_anno){ + std::cerr << "Warning: SAM session failed using the GPU. Will reattempt with the CPU.\n" << e.what() << std::endl; + m_use_gpu_for_sam_anno = false; + init_sam_session(m_use_gpu_for_sam_anno); + }else{ + std::cerr << "Error: SAM session failed even when using the CPU.\n" << e.what() << std::endl; + QMessageBox box; + box.warning(nullptr, "Error:", + QString::fromStdString("Error: SAM session failed.")); + return false; + } + }catch(...){ + std::cerr << "Error: Unknown error." << std::endl; + QMessageBox box; + box.warning(nullptr, "Error:", + QString::fromStdString("Error: Unknown error. SAM session failed.")); + return false; + } + + if (i > 0){ + std::cerr << "Internal Program Error: This section of code shouldn't be reachable." << std::endl; + QMessageBox box; + box.warning(nullptr, "Error:", + QString::fromStdString("Internal Program Error: This section of code shouldn't be reachable.")); + return false; + } + + } size_t min_mask_x = INT_MAX, max_mask_x = 0; size_t min_mask_y = INT_MAX, max_mask_y = 0; for (size_t y = 0; y < source_height; y++){ @@ -418,7 +473,7 @@ void LabelImages::remove_segmentation_exclusion_point(double x, double y){ void LabelImages::compute_embeddings_for_folder(const std::string& image_folder_path){ std::string embedding_model_path = RESOURCE_PATH() + "ML/sam_embedder_cpu.onnx"; std::cout << "Use SAM Embedding model " << embedding_model_path << std::endl; - ML::compute_embeddings_for_folder(embedding_model_path, image_folder_path); + ML::compute_embeddings_for_folder(embedding_model_path, image_folder_path, USE_GPU_FOR_EMBEDDER_SESSION); } void LabelImages::delete_selected_annotation(){ @@ -656,6 +711,24 @@ void LabelImages::export_to_yolov5_dataset(const std::string& image_folder_path, export_image_annotations_to_yolo_dataset(image_folder_path, image_folder_path, dataset_path); // image_folder_path, ML_ANNOTATION_PATH(), dataset_path); } +Color enum_to_color(ColorChoice color_choice){ + switch(color_choice){ + case ColorChoice::BLACK: + return COLOR_BLACK; + case ColorChoice::GREEN: + return COLOR_GREEN; + case ColorChoice::ORANGE: + return COLOR_ORANGE; + case ColorChoice::MAGENTA: + return COLOR_MAGENTA; + default: + QMessageBox box; + box.warning(nullptr, "Error:", + QString::fromStdString("Error: Unknown color selected.")); + return COLOR_BLACK; + + } +} } } diff --git a/SerialPrograms/Source/ML/Programs/ML_LabelImages.h b/SerialPrograms/Source/ML/Programs/ML_LabelImages.h index 41318c4c8c..7407ac964f 100644 --- a/SerialPrograms/Source/ML/Programs/ML_LabelImages.h +++ b/SerialPrograms/Source/ML/Programs/ML_LabelImages.h @@ -13,6 +13,8 @@ #include "Common/Cpp/Options/EnumDropdownOption.h" #include "Common/Cpp/Options/EnumDropdownDatabase.h" #include "Common/Cpp/Options/StringOption.h" +#include "Common/Cpp/Options/BooleanCheckBoxOption.h" +#include "Common/Cpp/Color.h" #include "CommonFramework/Panels/PanelInstance.h" #include "CommonFramework/ImageTypes/ImageRGB32.h" #include "Pokemon/Options/Pokemon_HomeSpriteSelectOption.h" @@ -39,6 +41,15 @@ class LabelImages_Widget; class LabelImages_OverlayManager; +enum class ColorChoice{ + BLACK, + GREEN, + ORANGE, + MAGENTA, +}; + +Color enum_to_color(ColorChoice color_choice); + class LabelImages_Descriptor : public PanelDescriptor{ public: LabelImages_Descriptor(); @@ -57,6 +68,8 @@ class LabelImages : public PanelInstance, public ConfigOption::Listener { virtual void from_json(const JsonValue& json) override; virtual JsonValue to_json() const override; + void init_sam_session(bool use_gpu); + void save_annotation_to_file() const; // called after loading a new image, clean up all internal data @@ -140,6 +153,8 @@ class LabelImages : public PanelInstance, public ConfigOption::Listener { // the group option that holds rest of the options defined below: BatchOption m_options; + bool m_use_gpu_for_sam_anno; + FloatingPointOption X; FloatingPointOption Y; FloatingPointOption WIDTH; @@ -158,6 +173,11 @@ class LabelImages : public PanelInstance, public ConfigOption::Listener { // source 3: editable text input StringCell MANUAL_LABEL; + BooleanCheckBoxOption USE_GPU_FOR_EMBEDDER_SESSION; + + + EnumDropdownOption SELECTED_ANNO_COLOR; + size_t source_image_height = 0; size_t source_image_width = 0; std::vector m_image_embedding; diff --git a/SerialPrograms/Source/ML/Programs/ML_LabelImagesOverlayManager.cpp b/SerialPrograms/Source/ML/Programs/ML_LabelImagesOverlayManager.cpp index 1977f86e4d..258235e0c7 100644 --- a/SerialPrograms/Source/ML/Programs/ML_LabelImagesOverlayManager.cpp +++ b/SerialPrograms/Source/ML/Programs/ML_LabelImagesOverlayManager.cpp @@ -105,7 +105,7 @@ void LabelImages_OverlayManager::update_rendered_annotations(){ if (form != nullptr){ label = form->display_name(); } - Color mask_box_color = (i_obj == m_selected) ? COLOR_BLACK : COLOR_BLUE; + Color mask_box_color = (i_obj == m_selected) ? enum_to_color(m_program.SELECTED_ANNO_COLOR) : COLOR_BLUE; m_overlay_set.add(mask_box_color, mask_float_box, label); size_t mask_width = obj.mask_box.width(); size_t mask_height = obj.mask_box.height(); diff --git a/SerialPrograms/Source/ML/Programs/ML_LabelImagesWidget.cpp b/SerialPrograms/Source/ML/Programs/ML_LabelImagesWidget.cpp index b567289711..fe91ec2747 100644 --- a/SerialPrograms/Source/ML/Programs/ML_LabelImagesWidget.cpp +++ b/SerialPrograms/Source/ML/Programs/ML_LabelImagesWidget.cpp @@ -125,6 +125,18 @@ LabelImages_Widget::LabelImages_Widget( annotation_row->addWidget(load_custom_set_button, 2); annotation_row->addWidget(new QLabel(scroll_inner), 10); // an empty label to push other UIs to the left + // add GPU checkbox row + QHBoxLayout* use_gpu_row = new QHBoxLayout(); + scroll_layout->addLayout(use_gpu_row); + ConfigWidget* gpu_checkbox_widget = program.USE_GPU_FOR_EMBEDDER_SESSION.make_QtWidget(*scroll_inner); + use_gpu_row->addWidget(&gpu_checkbox_widget->widget(), 2); + + // add Color selection dropdown + QHBoxLayout* color_choice_row = new QHBoxLayout(); + scroll_layout->addLayout(color_choice_row); + ConfigWidget* color_choice_widget = program.SELECTED_ANNO_COLOR.make_QtWidget(*scroll_inner); + color_choice_row->addWidget(&color_choice_widget->widget(), 2); + // add compute embedding button QHBoxLayout* external_action_row = new QHBoxLayout();