diff --git a/src/memos/api/config.py b/src/memos/api/config.py index d2dd19266..65049b0c2 100644 --- a/src/memos/api/config.py +++ b/src/memos/api/config.py @@ -1014,6 +1014,7 @@ def create_user_config(user_name: str, user_id: str) -> tuple["MOSConfig", "Gene "fast_graph": bool(os.getenv("FAST_GRAPH", "false") == "true"), "bm25": bool(os.getenv("BM25_CALL", "false") == "true"), "cot": bool(os.getenv("VEC_COT_CALL", "false") == "true"), + "fulltext": bool(os.getenv("FULLTEXT_CALL", "false") == "true"), }, "include_embedding": bool( os.getenv("INCLUDE_EMBEDDING", "false") == "true" @@ -1096,6 +1097,7 @@ def get_default_cube_config() -> "GeneralMemCubeConfig | None": "fast_graph": bool(os.getenv("FAST_GRAPH", "false") == "true"), "bm25": bool(os.getenv("BM25_CALL", "false") == "true"), "cot": bool(os.getenv("VEC_COT_CALL", "false") == "true"), + "fulltext": bool(os.getenv("FULLTEXT_CALL", "false") == "true"), }, "mode": os.getenv("ASYNC_MODE", "sync"), "include_embedding": bool( diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index f00efccb6..da6b1e32a 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -67,6 +67,7 @@ def __init__( self.internet_retriever = internet_retriever self.vec_cot = search_strategy.get("cot", False) if search_strategy else False self.use_fast_graph = search_strategy.get("fast_graph", False) if search_strategy else False + self.use_fulltext = search_strategy.get("fulltext", False) if search_strategy else False self.manual_close_internet = manual_close_internet self.tokenizer = tokenizer self._usage_executor = ContextThreadPoolExecutor(max_workers=4, thread_name_prefix="usage") @@ -380,20 +381,21 @@ def _retrieve_paths( user_name, ) ) - tasks.append( - executor.submit( - self._retrieve_from_keyword, - query, - parsed_goal, - query_embedding, - top_k, - memory_type, - search_filter, - search_priority, - user_name, - id_filter, + if self.use_fulltext: + tasks.append( + executor.submit( + self._retrieve_from_keyword, + query, + parsed_goal, + query_embedding, + top_k, + memory_type, + search_filter, + search_priority, + user_name, + id_filter, + ) ) - ) if search_tool_memory: tasks.append( executor.submit(