Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 5 additions & 65 deletions astrbot/core/provider/sources/zhipu_source.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from astrbot import logger
from astrbot.core.provider.func_tool_manager import FuncCall
from typing import List
# This file was originally created to adapt to glm-4v-flash, which only supports one image in the context.
# It is no longer specifically adapted to Zhipu's models. To ensure compatibility, this


from ..register import register_provider_adapter
from astrbot.core.provider.entities import LLMResponse
from .openai_source import ProviderOpenAIOfficial


@register_provider_adapter("zhipu_chat_completion", "智浦 Chat Completion 提供商适配器")
@register_provider_adapter("zhipu_chat_completion", "智谱 Chat Completion 提供商适配器")
class ProviderZhipu(ProviderOpenAIOfficial):
def __init__(
self,
Expand All @@ -19,63 +19,3 @@ def __init__(
provider_settings,
default_persona,
)

async def text_chat(
self,
prompt: str,
session_id: str = None,
image_urls: List[str] = None,
func_tool: FuncCall = None,
contexts=None,
system_prompt=None,
model=None,
**kwargs,
) -> LLMResponse:
if contexts is None:
contexts = []
new_record = await self.assemble_context(prompt, image_urls)
context_query = []

context_query = [*contexts, new_record]

model_cfgs: dict = self.provider_config.get("model_config", {})
model = model or self.get_model()
# glm-4v-flash 只支持一张图片
if model.lower() == "glm-4v-flash" and image_urls and len(context_query) > 1:
logger.debug("glm-4v-flash 只支持一张图片,将只保留最后一张图片")
logger.debug(context_query)
new_context_query_ = []
for i in range(0, len(context_query) - 1, 2):
if isinstance(context_query[i].get("content", ""), list):
continue
new_context_query_.append(context_query[i])
new_context_query_.append(context_query[i + 1])
new_context_query_.append(context_query[-1]) # 保留最后一条记录
context_query = new_context_query_
logger.debug(context_query)

if system_prompt:
context_query.insert(0, {"role": "system", "content": system_prompt})

payloads = {"messages": context_query, **model_cfgs}
try:
llm_response = await self._query(payloads, func_tool)
return llm_response
except Exception as e:
if "maximum context length" in str(e):
retry_cnt = 10
while retry_cnt > 0:
logger.warning(
f"请求失败:{e}。上下文长度超过限制。尝试弹出最早的记录然后重试。"
)
try:
self.pop_record(session_id)
llm_response = await self._query(payloads, func_tool)
break
except Exception as e:
if "maximum context length" in str(e):
retry_cnt -= 1
else:
raise e
else:
raise e