From 91185eb346430d929eeb6956a28912b18bc2a1e5 Mon Sep 17 00:00:00 2001 From: Jerry Yin Date: Tue, 9 Dec 2025 17:20:18 +0800 Subject: [PATCH] delete max_tokens --- src/server/api/memobase_server/llms/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/server/api/memobase_server/llms/__init__.py b/src/server/api/memobase_server/llms/__init__.py index c0c59945..72d5a115 100644 --- a/src/server/api/memobase_server/llms/__init__.py +++ b/src/server/api/memobase_server/llms/__init__.py @@ -24,7 +24,6 @@ async def llm_complete( history_messages=[], json_mode=False, model=None, - max_tokens=1024, **kwargs, ) -> Promise[str | dict]: use_model = model or CONFIG.best_llm_model @@ -37,7 +36,6 @@ async def llm_complete( prompt, system_prompt=system_prompt, history_messages=history_messages, - max_tokens=max_tokens, **kwargs, ) latency = (time.time() - start_time) * 1000