From 9861ddb808886be3610daf4e589f8f7927c4401a Mon Sep 17 00:00:00 2001 From: Soulter <905617992@qq.com> Date: Thu, 2 Oct 2025 15:58:14 +0800 Subject: [PATCH] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E6=8E=A5=E5=85=A5?= =?UTF-8?q?=E6=99=BA=E8=B0=B1=E6=8F=90=E4=BE=9B=E5=95=86=E5=90=8E=EF=BC=8C?= =?UTF-8?q?=E5=B7=A5=E5=85=B7=E8=B0=83=E7=94=A8=E6=97=A0=E9=99=90=E5=BE=AA?= =?UTF-8?q?=E7=8E=AF=E7=9A=84=E9=97=AE=E9=A2=98=EF=BC=8C=E5=B9=B6=E5=81=9C?= =?UTF-8?q?=E6=AD=A2=E6=94=AF=E6=8C=81=20glm-4v-flash?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixes: #2912 --- astrbot/core/provider/sources/zhipu_source.py | 70 ++----------------- 1 file changed, 5 insertions(+), 65 deletions(-) diff --git a/astrbot/core/provider/sources/zhipu_source.py b/astrbot/core/provider/sources/zhipu_source.py index cf52e95fc..e7b6ee4f4 100644 --- a/astrbot/core/provider/sources/zhipu_source.py +++ b/astrbot/core/provider/sources/zhipu_source.py @@ -1,12 +1,12 @@ -from astrbot import logger -from astrbot.core.provider.func_tool_manager import FuncCall -from typing import List +# This file was originally created to adapt to glm-4v-flash, which only supports one image in the context. +# It is no longer specifically adapted to Zhipu's models. To ensure compatibility, this + + from ..register import register_provider_adapter -from astrbot.core.provider.entities import LLMResponse from .openai_source import ProviderOpenAIOfficial -@register_provider_adapter("zhipu_chat_completion", "智浦 Chat Completion 提供商适配器") +@register_provider_adapter("zhipu_chat_completion", "智谱 Chat Completion 提供商适配器") class ProviderZhipu(ProviderOpenAIOfficial): def __init__( self, @@ -19,63 +19,3 @@ def __init__( provider_settings, default_persona, ) - - async def text_chat( - self, - prompt: str, - session_id: str = None, - image_urls: List[str] = None, - func_tool: FuncCall = None, - contexts=None, - system_prompt=None, - model=None, - **kwargs, - ) -> LLMResponse: - if contexts is None: - contexts = [] - new_record = await self.assemble_context(prompt, image_urls) - context_query = [] - - context_query = [*contexts, new_record] - - model_cfgs: dict = self.provider_config.get("model_config", {}) - model = model or self.get_model() - # glm-4v-flash 只支持一张图片 - if model.lower() == "glm-4v-flash" and image_urls and len(context_query) > 1: - logger.debug("glm-4v-flash 只支持一张图片,将只保留最后一张图片") - logger.debug(context_query) - new_context_query_ = [] - for i in range(0, len(context_query) - 1, 2): - if isinstance(context_query[i].get("content", ""), list): - continue - new_context_query_.append(context_query[i]) - new_context_query_.append(context_query[i + 1]) - new_context_query_.append(context_query[-1]) # 保留最后一条记录 - context_query = new_context_query_ - logger.debug(context_query) - - if system_prompt: - context_query.insert(0, {"role": "system", "content": system_prompt}) - - payloads = {"messages": context_query, **model_cfgs} - try: - llm_response = await self._query(payloads, func_tool) - return llm_response - except Exception as e: - if "maximum context length" in str(e): - retry_cnt = 10 - while retry_cnt > 0: - logger.warning( - f"请求失败:{e}。上下文长度超过限制。尝试弹出最早的记录然后重试。" - ) - try: - self.pop_record(session_id) - llm_response = await self._query(payloads, func_tool) - break - except Exception as e: - if "maximum context length" in str(e): - retry_cnt -= 1 - else: - raise e - else: - raise e