Skip to content
12 changes: 2 additions & 10 deletions astrbot/builtin_stars/astrbot/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,16 +100,8 @@ async def decorate_llm_req(self, event: AstrMessageEvent, req: ProviderRequest):
logger.error(f"ltm: {e}")

@filter.on_llm_response()
async def inject_reasoning(self, event: AstrMessageEvent, resp: LLMResponse):
"""在 LLM 响应后基于配置注入思考过程文本 / 在 LLM 响应后记录对话"""
umo = event.unified_msg_origin
cfg = self.context.get_config(umo).get("provider_settings", {})
show_reasoning = cfg.get("display_reasoning_text", False)
if show_reasoning and resp.reasoning_content:
resp.completion_text = (
f"🤔 思考: {resp.reasoning_content}\n\n{resp.completion_text}"
)

async def record_llm_resp_to_ltm(self, event: AstrMessageEvent, resp: LLMResponse):
"""在 LLM 响应后记录对话"""
if self.ltm and self.ltm_enabled(event):
try:
await self.ltm.after_req_llm(event, resp)
Expand Down
24 changes: 23 additions & 1 deletion astrbot/core/agent/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class ContentPart(BaseModel):

__content_part_registry: ClassVar[dict[str, type["ContentPart"]]] = {}

type: str
type: Literal["text", "think", "image_url", "audio_url"]

def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
Expand Down Expand Up @@ -63,6 +63,28 @@ class TextPart(ContentPart):
text: str


class ThinkPart(ContentPart):
"""
>>> ThinkPart(think="I think I need to think about this.").model_dump()
{'type': 'think', 'think': 'I think I need to think about this.', 'encrypted': None}
"""

type: str = "think"
think: str
encrypted: str | None = None
"""Encrypted thinking content, or signature."""

def merge_in_place(self, other: Any) -> bool:
if not isinstance(other, ThinkPart):
return False
if self.encrypted:
return False
self.think += other.think
if other.encrypted:
self.encrypted = other.encrypted
return True


class ImageURLPart(ContentPart):
"""
>>> ImageURLPart(image_url="http://example.com/image.jpg").model_dump()
Expand Down
31 changes: 24 additions & 7 deletions astrbot/core/agent/runners/tool_loop_agent_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
)

from astrbot import logger
from astrbot.core.agent.message import TextPart, ThinkPart
from astrbot.core.message.components import Json
from astrbot.core.message.message_event_result import (
MessageChain,
Expand Down Expand Up @@ -169,13 +170,20 @@ async def step(self):
self.final_llm_resp = llm_resp
self._transition_state(AgentState.DONE)
self.stats.end_time = time.time()

# record the final assistant message
self.run_context.messages.append(
Message(
role="assistant",
content=llm_resp.completion_text or "*No response*",
),
)
parts = []
if llm_resp.reasoning_content or llm_resp.reasoning_signature:
parts.append(
ThinkPart(
think=llm_resp.reasoning_content,
encrypted=llm_resp.reasoning_signature,
)
)
parts.append(TextPart(text=llm_resp.completion_text or "*No response*"))
self.run_context.messages.append(Message(role="assistant", content=parts))

# call the on_agent_done hook
try:
await self.agent_hooks.on_agent_done(self.run_context, llm_resp)
except Exception as e:
Expand Down Expand Up @@ -214,10 +222,19 @@ async def step(self):
data=AgentResponseData(chain=result),
)
# 将结果添加到上下文中
parts = []
if llm_resp.reasoning_content or llm_resp.reasoning_signature:
parts.append(
ThinkPart(
think=llm_resp.reasoning_content,
encrypted=llm_resp.reasoning_signature,
)
)
parts.append(TextPart(text=llm_resp.completion_text or "*No response*"))
tool_calls_result = ToolCallsResult(
tool_calls_info=AssistantMessageSegment(
tool_calls=llm_resp.to_openai_to_calls_model(),
content=llm_resp.completion_text,
content=parts,
),
tool_calls_result=tool_call_result_blocks,
)
Expand Down
6 changes: 6 additions & 0 deletions astrbot/core/astr_agent_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,12 @@
class MainAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
async def on_agent_done(self, run_context, llm_response):
# 执行事件钩子
if llm_response and llm_response.reasoning_content:
# we will use this in result_decorate stage to inject reasoning content to chain
run_context.context.event.set_extra(
"_llm_reasoning_content", llm_response.reasoning_content
)

await call_event_hook(
run_context.context.event,
EventType.OnLLMResponseEvent,
Expand Down
14 changes: 13 additions & 1 deletion astrbot/core/config/default.py
Original file line number Diff line number Diff line change
Expand Up @@ -905,6 +905,7 @@ class ChatProviderTemplate(TypedDict):
"key": [],
"api_base": "https://api.anthropic.com/v1",
"timeout": 120,
"anth_thinking_config": {"budget": 0},
},
"Moonshot": {
"id": "moonshot",
Expand All @@ -920,7 +921,7 @@ class ChatProviderTemplate(TypedDict):
"xAI": {
"id": "xai",
"provider": "xai",
"type": "openai_chat_completion",
"type": "xai_chat_completion",
"provider_type": "chat_completion",
"enable": True,
"key": [],
Expand Down Expand Up @@ -1787,6 +1788,17 @@ class ChatProviderTemplate(TypedDict):
},
},
},
"anth_thinking_config": {
"description": "Thinking Config",
"type": "object",
"items": {
"budget": {
"description": "Thinking Budget",
"type": "int",
"hint": "Anthropic thinking.budget_tokens param. Must >= 1024. See: https://platform.claude.com/docs/en/build-with-claude/extended-thinking",
},
},
},
"minimax-group-id": {
"type": "string",
"description": "用户组",
Expand Down
Loading