@@ -223,9 +223,30 @@ async def create_message(
223223 stop_sequences : list [str ] | None = None ,
224224 metadata : dict [str , Any ] | None = None ,
225225 model_preferences : types .ModelPreferences | None = None ,
226+ tools : list [types .Tool ] | None = None ,
227+ tool_choice : types .ToolChoice | None = None ,
226228 related_request_id : types .RequestId | None = None ,
227229 ) -> types .CreateMessageResult :
228- """Send a sampling/create_message request."""
230+ """Send a sampling/create_message request.
231+
232+ Args:
233+ messages: The conversation messages to send.
234+ max_tokens: Maximum number of tokens to generate.
235+ system_prompt: Optional system prompt.
236+ include_context: Optional context inclusion setting.
237+ temperature: Optional sampling temperature.
238+ stop_sequences: Optional stop sequences.
239+ metadata: Optional metadata to pass through to the LLM provider.
240+ model_preferences: Optional model selection preferences.
241+ tools: Optional list of tools the LLM can use during sampling.
242+ Requires client to have sampling.tools capability.
243+ tool_choice: Optional control over tool usage behavior.
244+ Requires client to have sampling.tools capability.
245+ related_request_id: Optional ID of a related request.
246+
247+ Returns:
248+ The sampling result from the client.
249+ """
229250 return await self .send_request (
230251 request = types .ServerRequest (
231252 types .CreateMessageRequest (
@@ -238,6 +259,8 @@ async def create_message(
238259 stopSequences = stop_sequences ,
239260 metadata = metadata ,
240261 modelPreferences = model_preferences ,
262+ tools = tools ,
263+ toolChoice = tool_choice ,
241264 ),
242265 )
243266 ),
0 commit comments