diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index 0e62128ea..b550e5d42 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -56,7 +56,7 @@ jobs: uses: actions/checkout@v4 - name: Install Dependencies - run: uv sync + run: uv sync --all-extras - name: Run testcase env: diff --git a/.gitignore b/.gitignore index 338baef8a..fe8a527a2 100644 --- a/.gitignore +++ b/.gitignore @@ -173,6 +173,8 @@ cython_debug/ # PyPI configuration file .pypirc +.vscode/ + **/uipath.db **/.uipath diff --git a/pyproject.toml b/pyproject.toml index 3bd56bf73..62e28e51c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "uipath-langchain" -version = "0.5.66" +version = "1.0.0" description = "Python SDK that enables developers to build and deploy LangGraph agents to the UiPath Cloud Platform" readme = { file = "README.md", content-type = "text/markdown" } requires-python = ">=3.11" @@ -10,7 +10,6 @@ dependencies = [ "langgraph>=1.0.0, <2.0.0", "langchain-core>=1.2.11, <2.0.0", "langgraph-checkpoint-sqlite>=3.0.3, <4.0.0", - "langchain-openai>=1.0.0, <2.0.0", "langchain>=1.0.0, <2.0.0", "pydantic-settings>=2.6.0", "python-dotenv>=1.0.1", @@ -20,6 +19,7 @@ dependencies = [ "jsonpath-ng>=1.7.0", "mcp==1.26.0", "langchain-mcp-adapters==0.2.1", + "uipath-langchain-client[openai]>=1.1.7", ] classifiers = [ @@ -35,8 +35,10 @@ maintainers = [ ] [project.optional-dependencies] -vertex = ["langchain-google-genai>=2.0.0", "google-generativeai>=0.8.0"] -bedrock = ["langchain-aws>=0.2.35", "boto3-stubs>=1.41.4"] +bedrock = ["uipath-langchain-client[aws]>=1.1.7"] +anthropic = ["uipath-langchain-client[anthropic]>=1.1.7"] +vertex = ["uipath-langchain-client[google,vertexai]>=1.1.7"] +all = ["uipath-langchain-client[all]>=1.1.7"] [project.entry-points."uipath.middlewares"] register = "uipath_langchain.middlewares:register_middleware" @@ -82,8 +84,6 @@ select = ["E", "F", "B", "I"] [tool.ruff.lint.per-file-ignores] "*" = ["E501"] -"src/uipath_langchain/chat/bedrock.py" = ["E402"] -"src/uipath_langchain/chat/vertex.py" = ["E402"] [tool.ruff.format] quote-style = "double" diff --git a/samples/bring-your-own-model/main.py b/samples/bring-your-own-model/main.py index b92d31d02..631d6a8a0 100644 --- a/samples/bring-your-own-model/main.py +++ b/samples/bring-your-own-model/main.py @@ -1,8 +1,9 @@ from langchain_core.messages import HumanMessage, SystemMessage -from langgraph.graph import START, StateGraph, END -from uipath_langchain.chat import UiPathChatOpenAI +from langgraph.graph import END, START, StateGraph from pydantic import BaseModel +from uipath_langchain.chat import UiPathChatOpenAI + class GraphState(BaseModel): topic: str @@ -11,11 +12,11 @@ class GraphState(BaseModel): class GraphOutput(BaseModel): report: str + async def generate_report(state: GraphState) -> GraphOutput: system_prompt = "You are a report generator. Please provide a brief report based on the given topic." llm = UiPathChatOpenAI( - byo_connection_id="my-custom-model", - model_name="gpt-4o-2024-11-20" + byo_connection_id="my-custom-model", model_name="gpt-4o-2024-11-20" ) output = await llm.ainvoke( [SystemMessage(system_prompt), HumanMessage(state.topic)] diff --git a/samples/chat-uipath-agent/graph.py b/samples/chat-uipath-agent/graph.py index d0bec324b..2ac3f59fc 100644 --- a/samples/chat-uipath-agent/graph.py +++ b/samples/chat-uipath-agent/graph.py @@ -1,7 +1,8 @@ from langchain.agents import create_agent -from uipath_langchain.chat import UiPathChatOpenAI from langchain_community.tools import DuckDuckGoSearchResults +from uipath_langchain.chat import UiPathChatOpenAI + search_tool = DuckDuckGoSearchResults() movie_system_prompt = """You are an advanced AI assistant specializing in movie research and analysis. Your primary functions are: diff --git a/samples/email-organizer-agent/src/email_organizer/main.py b/samples/email-organizer-agent/src/email_organizer/main.py index aa0561f1d..cc1fe5deb 100644 --- a/samples/email-organizer-agent/src/email_organizer/main.py +++ b/samples/email-organizer-agent/src/email_organizer/main.py @@ -1,19 +1,21 @@ import logging import os import re +from difflib import SequenceMatcher +from typing import Dict, List, Optional + +from langgraph.graph import END, START, StateGraph +from langgraph.types import Command, interrupt from pydantic import BaseModel, Field -from typing import List, Dict, Optional -from langgraph.graph import StateGraph, START, END -from langgraph.types import interrupt, Command from uipath.platform import UiPath -from uipath_langchain.chat import UiPathChat from uipath.platform.common import CreateTask + from email_organizer.outlook_client import OutlookClient -from difflib import SequenceMatcher +from uipath_langchain.chat import UiPathChat # Configuration DEFAULT_CONFIDENCE = 0.0 -USER = 'me' +USER = "me" MAX_EMAILS_TO_ANALYZE = 50 MAX_RULES_TO_CREATE = 5 @@ -21,12 +23,14 @@ uipath = UiPath() + class Email(BaseModel): id: str subject: str sender: str preview: str = "" + class Rule(BaseModel): id: str = "" rule_name: str @@ -38,40 +42,50 @@ class Rule(BaseModel): isEnabled: bool = True rule_type: str = "" + class llmRule(BaseModel): rule_name: str = Field(description="The unique identifier for the rule") - conditions: Dict = Field(default={}, description="Conditions must have this form {'predicate': ['value1', 'value2']}") + conditions: Dict = Field( + default={}, + description="Conditions must have this form {'predicate': ['value1', 'value2']}", + ) target_folder: str = Field(description="FolderName") reasoning: str = Field(description="Why this rule is useful") rule_type: str = Field(description="NEW or IMPROVED") + class RuleSuggestions(BaseModel): """Container for multiple rule suggestions from LLM""" + rules: List[llmRule] = Field(description="List of email rule suggestions") + class GraphInput(BaseModel): max_emails: int max_rules: int assignee: Optional[str] = None + class GraphOutput(BaseModel): success: bool rules_created: int message: str + class GraphState(BaseModel): model_config = {"arbitrary_types_allowed": True} emails: List[Email] = [] rules: List[Rule] = [] suggestions: List[Rule] = [] - folders: Dict[str, str] = {} # folder name to ID mapping + folders: Dict[str, str] = {} # folder name to ID mapping access_token: str = "" human_approved: bool = False outlook_client: Optional[OutlookClient] = None max_emails: int = MAX_EMAILS_TO_ANALYZE # From GraphInput - max_rules: int = MAX_RULES_TO_CREATE # From GraphInput - assignee: Optional[str] = None # From GraphInput + max_rules: int = MAX_RULES_TO_CREATE # From GraphInput + assignee: Optional[str] = None # From GraphInput + async def get_access_token(input_config: GraphInput) -> Command: """Get access token for Microsoft Graph API and initialize OutlookClient""" @@ -90,7 +104,7 @@ async def get_access_token(input_config: GraphInput) -> Command: "output": GraphOutput( success=False, rules_created=0, - message="Failed to obtain access token" + message="Failed to obtain access token", ) } ) @@ -101,9 +115,9 @@ async def get_access_token(input_config: GraphInput) -> Command: update={ "access_token": access_token, "outlook_client": outlook_client, - "max_emails": input_config.max_emails, # Pass from input - "max_rules": input_config.max_rules, # Pass from input - "assignee": input_config.assignee # Pass from input + "max_emails": input_config.max_emails, # Pass from input + "max_rules": input_config.max_rules, # Pass from input + "assignee": input_config.assignee, # Pass from input } ) except Exception as e: @@ -113,11 +127,12 @@ async def get_access_token(input_config: GraphInput) -> Command: "output": GraphOutput( success=False, rules_created=0, - message=f"Error retrieving access token: {e}" + message=f"Error retrieving access token: {e}", ) } ) + async def fetch_emails(state: GraphState) -> Command: """Fetch emails from inbox using OutlookClient""" try: @@ -135,8 +150,10 @@ async def fetch_emails(state: GraphState) -> Command: email = Email( id=item.get("id", ""), subject=item.get("subject", "No Subject"), - sender=item.get("from", {}).get("emailAddress", {}).get("address", "Unknown Sender"), - preview=item.get("bodyPreview", "") + sender=item.get("from", {}) + .get("emailAddress", {}) + .get("address", "Unknown Sender"), + preview=item.get("bodyPreview", ""), ) emails.append(email) except Exception as e: @@ -145,22 +162,23 @@ async def fetch_emails(state: GraphState) -> Command: logger.info(f"Fetched {len(emails)} emails from inbox") - return Command( - update={ - "emails": emails - } - ) + return Command(update={"emails": emails}) except Exception as e: logger.error(f"Error fetching emails: {e}") return Command( update={ - "output": GraphOutput(success=False, rules_created=0, message=f"Error fetching emails: {e}") + "output": GraphOutput( + success=False, + rules_created=0, + message=f"Error fetching emails: {e}", + ) } ) + async def fetch_folders(state: GraphState) -> Command: - """Fetch all mail folders """ + """Fetch all mail folders""" try: if not state.outlook_client: raise Exception("OutlookClient not initialized") @@ -171,20 +189,21 @@ async def fetch_folders(state: GraphState) -> Command: logger.info(f"Fetched {len(folders)} folders") logger.info(f"All available folders: {list(folders.keys())}") - return Command( - update={ - "folders": folders - } - ) + return Command(update={"folders": folders}) except Exception as e: logger.error(f"Error fetching folders: {e}") return Command( update={ - "output": GraphOutput(success=False, rules_created=0, message=f"Error fetching folders: {e}") + "output": GraphOutput( + success=False, + rules_created=0, + message=f"Error fetching folders: {e}", + ) } ) + async def fetch_rules(state: GraphState) -> Command: """Extract just moveToFolder actions from existing rules in Outlook""" try: @@ -219,7 +238,7 @@ async def fetch_rules(state: GraphState) -> Command: target_folder=target_folder, sequence=item.get("sequence", 1), isEnabled=item.get("isEnabled", True), - rule_type="EXISTING" + rule_type="EXISTING", ) rules.append(rule) except Exception as e: @@ -228,65 +247,72 @@ async def fetch_rules(state: GraphState) -> Command: logger.info(f"Fetched {len(rules)} existing rules from Outlook") - return Command( - update={ - "rules": rules - } - ) + return Command(update={"rules": rules}) except Exception as e: logger.error(f"Error fetching rules: {e}") return Command( update={ - "output": GraphOutput(success=False, rules_created=0, message=f"Error fetching rules: {e}") + "output": GraphOutput( + success=False, rules_created=0, message=f"Error fetching rules: {e}" + ) } ) + def _infer_conditions_from_rule(llm_rule: llmRule, emails: List[Email]) -> Dict: """Infer rule conditions from rule name, reasoning, and email patterns""" conditions = {} rule_text = f"{llm_rule.rule_name} {llm_rule.reasoning}".lower() - email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b' + email_pattern = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b" emails_mentioned = re.findall(email_pattern, llm_rule.reasoning, re.IGNORECASE) if emails_mentioned: - conditions['senderContains'] = emails_mentioned + conditions["senderContains"] = emails_mentioned else: # Try to infer from common patterns in rule names - if 'azure' in rule_text: - conditions['senderContains'] = ['azure', 'microsoft.com'] - elif 'teams' in rule_text: - conditions['senderContains'] = ['teams.mail.microsoft'] - elif 'sheerid' in rule_text: - conditions['senderContains'] = ['sheerid.com'] - elif 'upb' in rule_text or 'elearning' in rule_text: - conditions['senderContains'] = ['upb.ro', 'curs.upb.ro'] - elif 'grid' in rule_text or 'university' in rule_text: - conditions['senderContains'] = ['grid.pub.ro'] + if "azure" in rule_text: + conditions["senderContains"] = ["azure", "microsoft.com"] + elif "teams" in rule_text: + conditions["senderContains"] = ["teams.mail.microsoft"] + elif "sheerid" in rule_text: + conditions["senderContains"] = ["sheerid.com"] + elif "upb" in rule_text or "elearning" in rule_text: + conditions["senderContains"] = ["upb.ro", "curs.upb.ro"] + elif "grid" in rule_text or "university" in rule_text: + conditions["senderContains"] = ["grid.pub.ro"] else: # Fallback: analyze actual emails to find patterns target_folder = llm_rule.target_folder.lower() sender_patterns = [] for email in emails: - if any(keyword in email.sender.lower() for keyword in target_folder.split()): - sender_patterns.append(email.sender.split('@')[-1] if '@' in email.sender else email.sender) + if any( + keyword in email.sender.lower() for keyword in target_folder.split() + ): + sender_patterns.append( + email.sender.split("@")[-1] + if "@" in email.sender + else email.sender + ) if sender_patterns: # Take most common domain from collections import Counter + most_common = Counter(sender_patterns).most_common(1) if most_common: - conditions['senderContains'] = [most_common[0][0]] + conditions["senderContains"] = [most_common[0][0]] # If no conditions found, provide a generic fallback if not conditions: - conditions = {'senderContains': [llm_rule.target_folder.lower()]} + conditions = {"senderContains": [llm_rule.target_folder.lower()]} return conditions + async def llm_node(state: GraphState) -> Command: """Generate rule suggestions using LLM analysis of emails""" try: @@ -364,8 +390,7 @@ async def llm_node(state: GraphState) -> Command: Analyze the email patterns and create the most valuable organizational rules. """ - - llm = UiPathChat() + llm = UiPathChat(model="gpt-4o-mini-2024-07-18") structured_llm = llm.with_structured_output(RuleSuggestions) response = await structured_llm.ainvoke(prompt) @@ -373,28 +398,37 @@ async def llm_node(state: GraphState) -> Command: suggestions = [] - if hasattr(response, 'rules'): + if hasattr(response, "rules"): rules_list = response.rules - elif isinstance(response, dict) and 'rules' in response: - rules_list = response['rules'] - rules_list = [llmRule(**rule) if isinstance(rule, dict) else rule for rule in rules_list] + elif isinstance(response, dict) and "rules" in response: + rules_list = response["rules"] + rules_list = [ + llmRule(**rule) if isinstance(rule, dict) else rule + for rule in rules_list + ] elif isinstance(response, dict): rules_list = [llmRule(**response)] elif isinstance(response, list): - rules_list = [llmRule(**rule) if isinstance(rule, dict) else rule for rule in response] + rules_list = [ + llmRule(**rule) if isinstance(rule, dict) else rule for rule in response + ] else: - logger.warning(f"Unexpected response format: {type(response)}, content: {response}") + logger.warning( + f"Unexpected response format: {type(response)}, content: {response}" + ) rules_list = [] for idx, llm_rule in enumerate(rules_list): if isinstance(llm_rule, dict): rule_dict = llm_rule.copy() - if 'conditions' not in rule_dict: - rule_dict['conditions'] = {} + if "conditions" not in rule_dict: + rule_dict["conditions"] = {} try: llm_rule = llmRule(**rule_dict) except Exception as e: - logger.warning(f"Failed to create llmRule from dict {rule_dict}: {e}") + logger.warning( + f"Failed to create llmRule from dict {rule_dict}: {e}" + ) continue conditions = llm_rule.conditions @@ -412,32 +446,37 @@ async def llm_node(state: GraphState) -> Command: ) suggestions.append(rule_suggestion) - logger.info(f"Generated {len(suggestions)} rule suggestions using structured output") - - return Command( - update={ - "suggestions": suggestions - } + logger.info( + f"Generated {len(suggestions)} rule suggestions using structured output" ) + return Command(update={"suggestions": suggestions}) + except Exception as e: logger.error(f"Error generating rule suggestions: {e}") return Command( update={ - "output": GraphOutput(success=False, rules_created=0, message=f"Error generating suggestions: {e}") + "output": GraphOutput( + success=False, + rules_created=0, + message=f"Error generating suggestions: {e}", + ) } ) + async def wait_for_human_approval(state: GraphState) -> Command: """Wait for human approval before proceeding with rule creation""" # Format the suggestions for display - suggestions_text = "\n" + "="*60 + "\n" + suggestions_text = "\n" + "=" * 60 + "\n" suggestions_text += "EMAIL RULE SUGGESTIONS\n" - suggestions_text += "="*60 + "\n" + suggestions_text += "=" * 60 + "\n" for idx, suggestion in enumerate(state.suggestions, 1): - suggestions_text += f"\n{idx}.{suggestion.rule_type} Rule: {suggestion.rule_name}\n" + suggestions_text += ( + f"\n{idx}.{suggestion.rule_type} Rule: {suggestion.rule_name}\n" + ) suggestions_text += f" Target Folder: {suggestion.target_folder}\n" suggestions_text += " Conditions:\n" @@ -449,7 +488,9 @@ async def wait_for_human_approval(state: GraphState) -> Command: # Add explanation based on rule type if suggestion.rule_type == "IMPROVED": - suggestions_text += " This will enhance an existing rule with additional conditions\n" + suggestions_text += ( + " This will enhance an existing rule with additional conditions\n" + ) else: suggestions_text += " This will create a new rule\n" @@ -458,14 +499,15 @@ async def wait_for_human_approval(state: GraphState) -> Command: improved_rules = len([r for r in state.suggestions if r.rule_type == "IMPROVED"]) # Get unique target folders that don't exist yet - suggested_folders = set(r.target_folder for r in state.suggestions if r.rule_type == "NEW") + suggested_folders = set( + r.target_folder for r in state.suggestions if r.rule_type == "NEW" + ) existing_folders = set(state.folders.keys()) new_folders_needed = suggested_folders - existing_folders existing_folders_used = suggested_folders & existing_folders - - suggestions_text += f"\n" + "="*60 + "\n" - suggestions_text += f"SUMMARY:\n" + suggestions_text += "\n" + "=" * 60 + "\n" + suggestions_text += "SUMMARY:\n" suggestions_text += f"• {new_rules} new rules will be created\n" suggestions_text += f"• {improved_rules} existing rules will be improved\n" suggestions_text += f"• Total suggestions: {len(state.suggestions)}\n" @@ -477,12 +519,13 @@ async def wait_for_human_approval(state: GraphState) -> Command: suggestions_text += f" • {folder}\n" if existing_folders_used: - suggestions_text += f"\nEXISTING FOLDERS TO BE USED ({len(existing_folders_used)}):\n" + suggestions_text += ( + f"\nEXISTING FOLDERS TO BE USED ({len(existing_folders_used)}):\n" + ) for folder in sorted(existing_folders_used): suggestions_text += f" • {folder}\n" - - suggestions_text += "="*60 + "\n" + suggestions_text += "=" * 60 + "\n" suggestions_text += "\n Do you want to proceed with creating these rules?\n" suggestions_text += "Select 'true' to create the rules\n" suggestions_text += "Select 'false' to cancel\n" @@ -491,31 +534,32 @@ async def wait_for_human_approval(state: GraphState) -> Command: logger.info("Displaying suggestions to user for approval...") logger.info(suggestions_text) - action_data = interrupt(CreateTask( - app_name="escalation_agent_app", - title="Email Rule Suggestions - Approval Required", - data={ - "AgentOutput": suggestions_text, - "AgentName": "Email Organization Assistant" - }, - app_version=1, - assignee=state.assignee, # Use assignee from input - app_folder_path=os.getenv("FOLDER_PATH_PLACEHOLDER") - )) + action_data = interrupt( + CreateTask( + app_name="escalation_agent_app", + title="Email Rule Suggestions - Approval Required", + data={ + "AgentOutput": suggestions_text, + "AgentName": "Email Organization Assistant", + }, + app_version=1, + assignee=state.assignee, # Use assignee from input + app_folder_path=os.getenv("FOLDER_PATH_PLACEHOLDER"), + ) + ) # uncomment this to use regular cli --resume # action_data = interrupt("\nAgent output:\n" + suggestions_text + "\n\nDo you approve these email rule suggestions? (Yes/No)") logger.info(f"Action data received: {action_data}") # Wait for human approval - human_approved = isinstance(action_data.get("Answer"), bool) and action_data["Answer"] is True - - return Command( - update={ - "human_approved": human_approved - } + human_approved = ( + isinstance(action_data.get("Answer"), bool) and action_data["Answer"] is True ) + return Command(update={"human_approved": human_approved}) + + def conditions_overlap(cond1, cond2): """Check if two condition dicts have overlapping values.""" for key in cond1: @@ -550,32 +594,56 @@ async def create_rules(state: GraphState) -> Command: # Process each rule suggestion for idx, suggestion in enumerate(state.suggestions, 1): - logger.info(f"Processing rule {idx}/{len(state.suggestions)}: {suggestion.rule_name} ({suggestion.rule_type})") + logger.info( + f"Processing rule {idx}/{len(state.suggestions)}: {suggestion.rule_name} ({suggestion.rule_type})" + ) try: if suggestion.rule_type == "IMPROVED": # Try to match by name first - matched_rule = next((r for r in existing_rules if r.rule_name == suggestion.rule_name), None) + matched_rule = next( + ( + r + for r in existing_rules + if r.rule_name == suggestion.rule_name + ), + None, + ) # If not found, match by folder and overlapping conditions if not matched_rule: for r in existing_rules: folder_match = r.target_folder == suggestion.target_folder - cond_match = conditions_overlap(r.conditions, suggestion.conditions) - name_similarity = SequenceMatcher(None, r.rule_name, suggestion.rule_name).ratio() + cond_match = conditions_overlap( + r.conditions, suggestion.conditions + ) + name_similarity = SequenceMatcher( + None, r.rule_name, suggestion.rule_name + ).ratio() if folder_match and cond_match and name_similarity > 0.5: matched_rule = r break if matched_rule: logger.info(f"Updating existing rule: {matched_rule.rule_name}") - folder_id = state.folders.get(suggestion.target_folder, matched_rule.actions.get("moveToFolder", "")) + folder_id = state.folders.get( + suggestion.target_folder, + matched_rule.actions.get("moveToFolder", ""), + ) if not folder_id: - errors.append(f"No folder ID found for improved rule {suggestion.rule_name}") + errors.append( + f"No folder ID found for improved rule {suggestion.rule_name}" + ) continue try: - await state.outlook_client._delete(f"mailFolders/inbox/messageRules/{matched_rule.id}") - logger.info(f"Deleted existing rule: {matched_rule.rule_name}") + await state.outlook_client._delete( + f"mailFolders/inbox/messageRules/{matched_rule.id}" + ) + logger.info( + f"Deleted existing rule: {matched_rule.rule_name}" + ) except Exception as e: - logger.warning(f"Could not delete existing rule {matched_rule.rule_name}: {e}") + logger.warning( + f"Could not delete existing rule {matched_rule.rule_name}: {e}" + ) rule_data = { "displayName": suggestion.rule_name, "sequence": matched_rule.sequence, @@ -583,18 +651,26 @@ async def create_rules(state: GraphState) -> Command: "conditions": suggestion.conditions, "actions": { "moveToFolder": folder_id, - "stopProcessingRules": False - } + "stopProcessingRules": False, + }, } - rule_id = await state.outlook_client.create_message_rule(rule_data) + rule_id = await state.outlook_client.create_message_rule( + rule_data + ) if rule_id: rules_updated += 1 logger.info(f"Updated rule: {suggestion.rule_name}") else: - errors.append(f"Failed to update rule {suggestion.rule_name}") + errors.append( + f"Failed to update rule {suggestion.rule_name}" + ) else: - logger.warning(f"Cannot improve rule '{suggestion.rule_name}' - no similar existing rule found") - errors.append(f"Cannot improve rule '{suggestion.rule_name}' - no similar existing rule found") + logger.warning( + f"Cannot improve rule '{suggestion.rule_name}' - no similar existing rule found" + ) + errors.append( + f"Cannot improve rule '{suggestion.rule_name}' - no similar existing rule found" + ) elif suggestion.rule_type == "NEW": # Handle NEW rule - create folder if needed, then create rule folder_name = suggestion.target_folder @@ -610,7 +686,9 @@ async def create_rules(state: GraphState) -> Command: else: # Create new folder logger.info(f"Creating new folder: {folder_name}") - folder_id = await state.outlook_client.create_folder(folder_name) + folder_id = await state.outlook_client.create_folder( + folder_name + ) if folder_id: created_folders[folder_name] = folder_id @@ -632,11 +710,13 @@ async def create_rules(state: GraphState) -> Command: "conditions": suggestion.conditions, "actions": { "moveToFolder": folder_id, - "stopProcessingRules": False - } + "stopProcessingRules": False, + }, } - logger.info(f"Creating new rule: {suggestion.rule_name} -> {folder_name}") + logger.info( + f"Creating new rule: {suggestion.rule_name} -> {folder_name}" + ) rule_id = await state.outlook_client.create_message_rule(rule_data) if rule_id: @@ -646,11 +726,16 @@ async def create_rules(state: GraphState) -> Command: errors.append(f"Failed to create rule {suggestion.rule_name}") else: - logger.warning(f"Unknown rule type '{suggestion.rule_type}' for rule {suggestion.rule_name}") - errors.append(f"Unknown rule type '{suggestion.rule_type}' for rule {suggestion.rule_name}") + logger.warning( + f"Unknown rule type '{suggestion.rule_type}' for rule {suggestion.rule_name}" + ) + errors.append( + f"Unknown rule type '{suggestion.rule_type}' for rule {suggestion.rule_name}" + ) # Rate limiting to avoid API throttling import asyncio + await asyncio.sleep(0.5) except Exception as e: @@ -659,7 +744,7 @@ async def create_rules(state: GraphState) -> Command: continue # Log final summary - logger.info(f"FINAL SUMMARY:") + logger.info("FINAL SUMMARY:") logger.info(f" • Created {len(created_folders)} new folders") logger.info(f" • Created {rules_created} new rules") logger.info(f" • Updated {rules_updated} existing rules") @@ -677,7 +762,7 @@ async def create_rules(state: GraphState) -> Command: "output": GraphOutput( success=success, rules_created=total_rules, - message=f"Created {len(created_folders)} folders, {rules_created} new rules, updated {rules_updated} existing rules. {len(errors)} errors encountered." + message=f"Created {len(created_folders)} folders, {rules_created} new rules, updated {rules_updated} existing rules. {len(errors)} errors encountered.", ) } ) @@ -689,11 +774,12 @@ async def create_rules(state: GraphState) -> Command: "output": GraphOutput( success=False, rules_created=0, - message=f"Error creating/updating rules: {e}" + message=f"Error creating/updating rules: {e}", ) } ) + def build_graph() -> StateGraph: """Build and compile the email organization graph.""" builder = StateGraph(GraphState, input=GraphInput, output=GraphOutput) @@ -714,18 +800,24 @@ def build_graph() -> StateGraph: builder.add_edge("fetch_folders", "fetch_rules") builder.add_edge("fetch_rules", "llm_analysis") builder.add_edge("llm_analysis", "wait_for_approval") + def should_create_rules(state: GraphState) -> str: return "create_rules" if state.human_approved else "END" + builder.add_conditional_edges( "wait_for_approval", should_create_rules, - {"create_rules": "create_rules", "END": END} + {"create_rules": "create_rules", "END": END}, ) builder.add_edge("create_rules", END) from langgraph.checkpoint.memory import MemorySaver + checkpointer = MemorySaver() - return builder.compile(checkpointer=checkpointer, interrupt_before=["wait_for_approval"]) + return builder.compile( + checkpointer=checkpointer, interrupt_before=["wait_for_approval"] + ) + graph = build_graph() diff --git a/samples/joke-agent/graph.py b/samples/joke-agent/graph.py index 85025cae3..a14edeef3 100644 --- a/samples/joke-agent/graph.py +++ b/samples/joke-agent/graph.py @@ -3,18 +3,18 @@ from langchain.agents import create_agent from langchain_core.messages import HumanMessage from langchain_core.tools import tool -from langgraph.constants import START, END +from langgraph.constants import END, START from langgraph.graph import StateGraph +from middleware import CustomFilterAction, LoggingMiddleware from pydantic import BaseModel from uipath.core.guardrails import GuardrailScope -from middleware import CustomFilterAction, LoggingMiddleware from uipath_langchain.chat import UiPathChat from uipath_langchain.guardrails import ( BlockAction, - PIIDetectionEntity, GuardrailExecutionStage, LogAction, + PIIDetectionEntity, PIIDetectionEntityType, UiPathDeterministicGuardrailMiddleware, UiPathPIIDetectionMiddleware, @@ -26,11 +26,13 @@ # Define input schema for the agent class Input(BaseModel): """Input schema for the joke agent.""" + topic: str class Output(BaseModel): """Output schema for the joke agent.""" + joke: str @@ -57,6 +59,7 @@ def analyze_joke_syntax(joke: str) -> str: return f"Words number: {word_count}\nLetters: {letter_count}" + # System prompt based on agent1.json SYSTEM_PROMPT = """You are an AI assistant designed to generate family-friendly jokes. Your process is as follows: @@ -140,7 +143,7 @@ def analyze_joke_syntax(joke: str) -> str: ), stage=GuardrailExecutionStage.POST, name="Joke Content Always Filter", - ) + ), ], ) @@ -150,7 +153,9 @@ async def joke_node(state: Input) -> Output: """Convert topic to messages, call agent, and extract joke.""" # Convert topic to messages format messages = [ - HumanMessage(content=f"Generate a family-friendly joke based on the topic: {state.topic}") + HumanMessage( + content=f"Generate a family-friendly joke based on the topic: {state.topic}" + ) ] # Call the agent with messages diff --git a/samples/oauth-external-apps-agent/main.py b/samples/oauth-external-apps-agent/main.py index 97fd32ce9..8d9802a58 100644 --- a/samples/oauth-external-apps-agent/main.py +++ b/samples/oauth-external-apps-agent/main.py @@ -1,21 +1,21 @@ import os -import dotenv -import httpx from contextlib import asynccontextmanager -from typing import Optional, Literal +from typing import Literal, Optional -from pydantic import BaseModel -from langgraph.graph import StateGraph, START, END -from langgraph.types import Command +import dotenv +import httpx from langchain.agents import create_agent -from langchain.messages import SystemMessage, HumanMessage - -from uipath_langchain.chat.models import UiPathChat +from langchain.messages import HumanMessage, SystemMessage from langchain_mcp_adapters.tools import load_mcp_tools +from langgraph.graph import END, START, StateGraph +from langgraph.types import Command from mcp import ClientSession from mcp.client.streamable_http import streamablehttp_client +from pydantic import BaseModel from uipath.platform import UiPath +from uipath_langchain.chat import UiPathChat + dotenv.load_dotenv() UIPATH_CLIENT_ID = "EXTERNAL_APP_CLIENT_ID_HERE" @@ -24,17 +24,21 @@ UIPATH_URL = "base_url" UIPATH_MCP_SERVER_URL = os.getenv("UIPATH_MCP_SERVER_URL") + class GraphInput(BaseModel): task: str + class GraphOutput(BaseModel): result: str + class State(BaseModel): task: str access_token: Optional[str] = os.getenv("UIPATH_ACCESS_TOKEN") result: Optional[str] = None + async def fetch_new_access_token(state: State) -> Command: try: UiPath( @@ -48,6 +52,7 @@ async def fetch_new_access_token(state: State) -> Command: except Exception as e: raise Exception(f"Failed to initialize UiPath SDK: {str(e)}") + @asynccontextmanager async def agent_mcp(access_token: str): async with streamablehttp_client( @@ -62,28 +67,37 @@ async def agent_mcp(access_token: str): agent = create_agent(model, tools=tools) yield agent + async def connect_to_mcp(state: State) -> Command: try: async with agent_mcp(state.access_token) as agent: - agent_response = await agent.ainvoke({ - "messages": [ - SystemMessage(content="You are a helpful assistant."), - HumanMessage(content=state.task), - ], - }) + agent_response = await agent.ainvoke( + { + "messages": [ + SystemMessage(content="You are a helpful assistant."), + HumanMessage(content=state.task), + ], + } + ) return Command(update={"result": agent_response["messages"][-1].content}) except ExceptionGroup as e: for error in e.exceptions: - if isinstance(error, httpx.HTTPStatusError) and error.response.status_code == 401: + if ( + isinstance(error, httpx.HTTPStatusError) + and error.response.status_code == 401 + ): return Command(update={"access_token": None}) raise + def route_start(state: State) -> Literal["fetch_new_access_token", "connect_to_mcp"]: return "fetch_new_access_token" if state.access_token is None else "connect_to_mcp" + def route_after_connect(state: State): return "fetch_new_access_token" if state.access_token is None else END + builder = StateGraph(State, input=GraphInput, output=GraphOutput) builder.add_node("fetch_new_access_token", fetch_new_access_token) builder.add_node("connect_to_mcp", connect_to_mcp) diff --git a/samples/retrieval-chain/main.py b/samples/retrieval-chain/main.py index afdf9f41a..65669e5fc 100644 --- a/samples/retrieval-chain/main.py +++ b/samples/retrieval-chain/main.py @@ -11,14 +11,17 @@ from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_core.vectorstores import VectorStore -from uipath_langchain.chat.models import UiPathAzureChatOpenAI + +from uipath_langchain.chat import UiPathAzureChatOpenAI from uipath_langchain.vectorstores.context_grounding_vectorstore import ( ContextGroundingVectorStore, ) + @dataclass class MainInput: """Input parameters for the main function.""" + query: str index_name: str k: int @@ -67,17 +70,16 @@ def retrieval_chain(query: str) -> dict[str, Any]: async def main(input_data: MainInput): - """Run a simple example of ContextGroundingVectorStore.""" - vectorstore = ContextGroundingVectorStore( - index_name=input_data.index_name - ) + vectorstore = ContextGroundingVectorStore(index_name=input_data.index_name) # Use query from input query = input_data.query # Perform semantic searches with distance scores - docs_with_scores = await vectorstore.asimilarity_search_with_score(query=query, k=input_data.k) + docs_with_scores = await vectorstore.asimilarity_search_with_score( + query=query, k=input_data.k + ) print("==== Docs with distance scores ====") pprint( [ @@ -88,7 +90,9 @@ async def main(input_data: MainInput): # Perform a similarity search with relevance scores docs_with_relevance_scores = ( - await vectorstore.asimilarity_search_with_relevance_scores(query=query, k=input_data.k) + await vectorstore.asimilarity_search_with_relevance_scores( + query=query, k=input_data.k + ) ) print("==== Docs with relevance scores ====") pprint( @@ -100,6 +104,7 @@ async def main(input_data: MainInput): # Run a retrieval chain model = UiPathAzureChatOpenAI( + model="gpt-4o-mini-2024-07-18", max_retries=3, ) diff --git a/src/uipath_langchain/_utils/__init__.py b/src/uipath_langchain/_utils/__init__.py deleted file mode 100644 index 1902d7308..000000000 --- a/src/uipath_langchain/_utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from ._request_mixin import UiPathRequestMixin - -__all__ = ["UiPathRequestMixin"] diff --git a/src/uipath_langchain/_utils/_request_mixin.py b/src/uipath_langchain/_utils/_request_mixin.py deleted file mode 100644 index b787ee715..000000000 --- a/src/uipath_langchain/_utils/_request_mixin.py +++ /dev/null @@ -1,806 +0,0 @@ -# mypy: disable-error-code="no-redef,arg-type" -import json -import logging -import os -import time -from typing import Any, AsyncIterator, Dict, Iterator, Mapping - -import httpx -import openai -from langchain_core.embeddings import Embeddings -from langchain_core.language_models.chat_models import _cleanup_llm_representation -from langchain_core.messages import AIMessageChunk -from langchain_core.messages.ai import UsageMetadata -from langchain_core.outputs import ChatGenerationChunk -from pydantic import BaseModel, ConfigDict, Field, SecretStr, ValidationError -from tenacity import ( - AsyncRetrying, - Retrying, - retry_if_exception_type, - stop_after_attempt, - wait_exponential_jitter, -) -from uipath._utils._ssl_context import get_httpx_client_kwargs -from uipath.runtime.errors import ( - UiPathErrorCategory, - UiPathErrorCode, - UiPathRuntimeError, -) - -from uipath_langchain._utils._settings import ( - UiPathClientFactorySettings, - UiPathClientSettings, - get_uipath_token_header, -) -from uipath_langchain._utils._sleep_policy import before_sleep_log -from uipath_langchain.runtime.errors import ( - LangGraphErrorCode, - LangGraphRuntimeError, -) - - -def get_from_uipath_url(): - url = os.getenv("UIPATH_URL") - if url: - return "/".join(url.split("/", 3)[:3]) - return None - - -def _get_access_token(data): - """Get access token from settings, environment variables, or UiPath client factory.""" - token = ( - getattr(data["settings"], "access_token", None) - or os.getenv("UIPATH_ACCESS_TOKEN") - or os.getenv("UIPATH_SERVICE_TOKEN") - ) - - if token: - return token - - try: - settings = UiPathClientFactorySettings( - UIPATH_BASE_URL=data["base_url"], - UIPATH_CLIENT_ID=data["client_id"], - UIPATH_CLIENT_SECRET=data["client_secret"], - ) - return get_uipath_token_header(settings) - except ValidationError: - raise UiPathRuntimeError( - UiPathErrorCode.EXECUTION_ERROR, - title="Authorization required", - detail="Authorization required. Please run uipath auth", - category=UiPathErrorCategory.USER, - ) from None - - -class UiPathRequestMixin(BaseModel): - model_config = ConfigDict(arbitrary_types_allowed=True) - - default_headers: Mapping[str, str] | None = { - "X-UiPath-Streaming-Enabled": "false", - "X-UiPath-JobKey": os.getenv("UIPATH_JOB_KEY", ""), - "X-UiPath-ProcessKey": os.getenv("UIPATH_PROCESS_KEY", ""), - } - model_name: str | None = Field( - default_factory=lambda: os.getenv( - "UIPATH_MODEL_NAME", "gpt-4.1-mini-2025-04-14" - ), - alias="model", - ) - settings: UiPathClientSettings | None = None - client_id: str | None = Field(default_factory=lambda: os.getenv("UIPATH_CLIENT_ID")) - client_secret: str | None = Field( - default_factory=lambda: os.getenv("UIPATH_CLIENT_SECRET") - ) - base_url: str | None = Field( - default_factory=lambda data: ( - getattr(data["settings"], "base_url", None) - or os.getenv("UIPATH_BASE_URL") - or get_from_uipath_url() - ), - alias="azure_endpoint", - ) - access_token: str | None = Field( - default_factory=lambda data: _get_access_token(data) - ) - - org_id: Any = Field( - default_factory=lambda data: ( - getattr(data["settings"], "org_id", None) - or os.getenv("UIPATH_ORGANIZATION_ID", "") - ) - ) - tenant_id: Any = Field( - default_factory=lambda data: ( - getattr(data["settings"], "tenant_id", None) - or os.getenv("UIPATH_TENANT_ID", "") - ) - ) - requesting_product: Any = Field( - default_factory=lambda data: ( - getattr(data["settings"], "requesting_product", None) - or os.getenv("UIPATH_REQUESTING_PRODUCT", "uipath-python-sdk") - ) - ) - requesting_feature: Any = Field( - default_factory=lambda data: ( - getattr(data["settings"], "requesting_feature", None) - or os.getenv("UIPATH_REQUESTING_FEATURE", "langgraph-agent") - ) - ) - default_request_timeout: Any = Field( - default_factory=lambda data: float( - getattr(data["settings"], "timeout_seconds", None) - or os.getenv("UIPATH_TIMEOUT_SECONDS", "120") - ), - alias="timeout", - ) - - openai_api_version: str | None = Field( - default_factory=lambda: os.getenv("OPENAI_API_VERSION", "2024-08-01-preview"), - alias="api_version", - ) - include_account_id: bool = False - temperature: float | None = 0.0 - max_tokens: int | None = 1000 - frequency_penalty: float | None = None - presence_penalty: float | None = None - agenthub_config: str | None = None - byo_connection_id: str | None = None - - logger: logging.Logger | None = None - max_retries: int | None = 5 - base_delay: float = 5.0 - max_delay: float = 60.0 - - _url: str | None = None - _auth_headers: dict[str, str] | None = None - - # required to instantiate AzureChatOpenAI subclasses - azure_endpoint: str | None = Field( - default="placeholder", description="Bypassed Azure endpoint" - ) - openai_api_key: SecretStr | None = Field( - default=SecretStr("placeholder"), description="Bypassed API key" - ) - # required to instatiate ChatAnthropic subclasses (will be needed when passthrough is implemented for Anthropic models) - stop_sequences: list[str] | None = Field( - default=None, description="Bypassed stop sequence" - ) - - def _request( - self, url: str, request_body: dict[str, Any], headers: dict[str, str] - ) -> dict[str, Any]: - """Run an asynchronous call to the LLM.""" - # if self.logger: - # self.logger.info(f"Completion request: {request_body['messages'][:2]}") - client_kwargs = get_httpx_client_kwargs() - with httpx.Client( - **client_kwargs, # Apply SSL configuration - event_hooks={ - "request": [self._log_request_duration], - "response": [self._log_response_duration], - }, - ) as client: - response = client.post( - url, - headers=headers, - json=request_body, - timeout=self.default_request_timeout, - ) - - # Handle HTTP errors and map them to OpenAI exceptions - try: - content = response.content # Read content to avoid closed stream issues - print(f"Response content: {content.decode('utf-8')}") - response.raise_for_status() - except httpx.HTTPStatusError as err: - if self.logger: - self.logger.error( - "Error querying UiPath: %s (%s)", - err.response.reason_phrase, - err.response.status_code, - extra={ - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else None, - ) - raise self._make_status_error_from_response(err.response) from err - - return response.json() - - def _call( - self, url: str, request_body: dict[str, Any], headers: dict[str, str] - ) -> dict[str, Any]: - """Run a synchronous call with retries to LLM""" - if self.max_retries is None: - return self._request(url, request_body, headers) - - retryer = Retrying( - stop=stop_after_attempt(self.max_retries), - wait=wait_exponential_jitter( - initial=self.base_delay, - max=self.max_delay, - jitter=1.0, - ), - retry=retry_if_exception_type( - (openai.RateLimitError, httpx.TimeoutException) - ), - reraise=True, - before_sleep=before_sleep_log(self.logger, logging.WARNING) - if self.logger is not None - else None, - ) - - try: - return retryer(self._request, url, request_body, headers) - # return self._request(url, request_body, headers) - except openai.APIStatusError as err: - if self.logger: - self.logger.error( - "Failed querying LLM after retries: %s", - err, - extra={ - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else None, - ) - raise err - - async def _arequest( - self, url: str, request_body: dict[str, Any], headers: dict[str, str] - ) -> dict[str, Any]: - # if self.logger: - # self.logger.info(f"Completion request: {request_body['messages'][:2]}") - client_kwargs = get_httpx_client_kwargs() - async with httpx.AsyncClient( - **client_kwargs, # Apply SSL configuration - event_hooks={ - "request": [self._alog_request_duration], - "response": [self._alog_response_duration], - }, - ) as client: - response = await client.post( - url, - headers=headers, - json=request_body, - timeout=self.default_request_timeout, - ) - # Handle HTTP errors and map them to OpenAI exceptions - try: - response.raise_for_status() - except httpx.HTTPStatusError as err: - if self.logger: - self.logger.error( - "Error querying LLM: %s (%s)", - err.response.reason_phrase, - err.response.status_code, - extra={ - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else None, - ) - raise self._make_status_error_from_response(err.response) from err - - return response.json() - - async def _acall( - self, url: str, request_body: dict[str, Any], headers: dict[str, str] - ) -> dict[str, Any]: - """Run an asynchronous call with retries to the LLM.""" - if self.max_retries is None: - return await self._arequest(url, request_body, headers) - - retryer = AsyncRetrying( - stop=stop_after_attempt(self.max_retries), - wait=wait_exponential_jitter( - initial=self.base_delay, - max=self.max_delay, - jitter=1.0, - ), - retry=retry_if_exception_type( - (openai.RateLimitError, httpx.TimeoutException) - ), - reraise=True, - before_sleep=before_sleep_log(self.logger, logging.WARNING) - if self.logger is not None - else None, - ) - - try: - response: Any = await retryer(self._arequest, url, request_body, headers) - if self.logger: - self.logger.info( - f"[uipath_langchain_client] Finished retryer after {retryer.statistics['attempt_number'] - 1} retries", - extra={ - "retry": f"{retryer.statistics['attempt_number'] - 1}", - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else { - "retry": f"{retryer.statistics['attempt_number'] - 1}", - }, - ) - return response - except openai.APIStatusError as err: - if self.logger: - self.logger.error( - "[uipath_langchain_client] Failed querying LLM after retries: %s", - err, - extra={ - "reason": err.message, - "statusCode": err.status_code, - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else { - "reason": err.message, - "statusCode": err.status_code, - }, - ) - raise err - - def _convert_chunk( - self, - chunk: Dict[str, Any], - default_chunk_class: type, - include_tool_calls: bool = False, - ) -> ChatGenerationChunk | None: - """Convert a streaming chunk to a ChatGenerationChunk. - - Args: - chunk: The raw SSE chunk dictionary - default_chunk_class: The default message chunk class to use - include_tool_calls: Whether to parse and include tool call chunks - - Returns: - A ChatGenerationChunk or None if the chunk should be skipped - """ - - token_usage = chunk.get("usage") - choices = chunk.get("choices", []) - - usage_metadata: UsageMetadata | None = None - if token_usage: - usage_metadata = UsageMetadata( - input_tokens=token_usage.get("prompt_tokens", 0), - output_tokens=token_usage.get("completion_tokens", 0), - total_tokens=token_usage.get("total_tokens", 0), - ) - - if len(choices) == 0: - return ChatGenerationChunk( - message=default_chunk_class(content="", usage_metadata=usage_metadata), - generation_info={}, - ) - - choice = choices[0] - delta = choice.get("delta") - if delta is None: - return None - - # Extract content from delta - content = delta.get("content", "") - - # Build the message chunk - message_kwargs = { - "content": content or "", - "usage_metadata": usage_metadata, - } - - # Handle tool calls if requested (for normalized API) - if include_tool_calls: - tool_calls = delta.get("tool_calls", []) - tool_call_chunks = [] - if tool_calls: - for tc in tool_calls: - # Tool call structure: {'function': {'name': '...', 'arguments': '...'}, 'id': '...', 'index': 0} - function = tc.get("function", {}) - tool_call_chunks.append( - { - "id": tc.get("id"), - "name": function.get("name"), - "args": function.get("arguments", ""), - "index": tc.get("index", 0), - } - ) - if tool_call_chunks: - message_kwargs["tool_call_chunks"] = tool_call_chunks - - message_chunk = AIMessageChunk(**message_kwargs) - - generation_info = {} - if finish_reason := choice.get("finish_reason"): - generation_info["finish_reason"] = finish_reason - if model_name := chunk.get("model"): - generation_info["model_name"] = model_name - - return ChatGenerationChunk( - message=message_chunk, - generation_info=generation_info or None, - ) - - def _stream_request( - self, url: str, request_body: Dict[str, Any], headers: Dict[str, str] - ) -> Iterator[Dict[str, Any]]: - """Stream SSE responses from the LLM.""" - client_kwargs = get_httpx_client_kwargs() - with httpx.Client( - **client_kwargs, - event_hooks={ - "request": [self._log_request_duration], - "response": [self._log_response_duration], - }, - ) as client: - with client.stream( - "POST", - url, - headers=headers, - json=request_body, - timeout=self.default_request_timeout, - ) as response: - try: - response.raise_for_status() - except httpx.HTTPStatusError as err: - if self.logger: - self.logger.error( - "Error querying UiPath: %s (%s)", - err.response.reason_phrase, - err.response.status_code, - extra={ - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else None, - ) - # Read the response body for streaming responses - err.response.read() - raise self._make_status_error_from_response(err.response) from err - - for line in response.iter_lines(): - line = line.strip() - if not line: - continue - - if self.logger: - self.logger.debug(f"[SSE] Raw line: {line}") - - if line.startswith("data:"): - data = line[ - 5: - ].strip() # Remove "data:" prefix and strip whitespace - if data == "[DONE]": - break - if not data: # Skip empty data lines - continue - try: - parsed = json.loads(data) - # Skip empty chunks (some APIs send them as keepalive) - # Check for truly empty: empty id AND (no choices or empty choices list) - if (not parsed.get("id") or parsed.get("id") == "") and ( - not parsed.get("choices") - or len(parsed.get("choices", [])) == 0 - ): - if self.logger: - self.logger.debug( - "[SSE] Skipping empty keepalive chunk" - ) - continue - yield parsed - except json.JSONDecodeError as e: - if self.logger: - self.logger.warning( - f"Failed to parse SSE chunk: {data}, error: {e}" - ) - continue - else: - # Handle lines without "data: " prefix (some APIs send raw JSON) - try: - parsed = json.loads(line) - if self.logger: - self.logger.debug(f"[SSE] Parsed raw JSON: {parsed}") - yield parsed - except json.JSONDecodeError: - # Not JSON, skip - pass - - async def _astream_request( - self, url: str, request_body: Dict[str, Any], headers: Dict[str, str] - ) -> AsyncIterator[Dict[str, Any]]: - """Async stream SSE responses from the LLM.""" - client_kwargs = get_httpx_client_kwargs() - async with httpx.AsyncClient( - **client_kwargs, - event_hooks={ - "request": [self._alog_request_duration], - "response": [self._alog_response_duration], - }, - ) as client: - async with client.stream( - "POST", - url, - headers=headers, - json=request_body, - timeout=self.default_request_timeout, - ) as response: - try: - response.raise_for_status() - except httpx.HTTPStatusError as err: - if self.logger: - self.logger.error( - "Error querying LLM: %s (%s)", - err.response.reason_phrase, - err.response.status_code, - extra={ - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else None, - ) - # Read the response body for streaming responses - await err.response.aread() - raise self._make_status_error_from_response(err.response) from err - - async for line in response.aiter_lines(): - line = line.strip() - if not line: - continue - - if self.logger: - self.logger.debug(f"[SSE] Raw line: {line}") - - if line.startswith("data:"): - data = line[ - 5: - ].strip() # Remove "data:" prefix and strip whitespace - if data == "[DONE]": - break - if not data: # Skip empty data lines - continue - try: - parsed = json.loads(data) - # Skip empty chunks (some APIs send them as keepalive) - # Check for truly empty: empty id AND (no choices or empty choices list) - if (not parsed.get("id") or parsed.get("id") == "") and ( - not parsed.get("choices") - or len(parsed.get("choices", [])) == 0 - ): - if self.logger: - self.logger.debug( - "[SSE] Skipping empty keepalive chunk" - ) - continue - yield parsed - except json.JSONDecodeError as e: - if self.logger: - self.logger.warning( - f"Failed to parse SSE chunk: {data}, error: {e}" - ) - continue - else: - # Handle lines without "data: " prefix (some APIs send raw JSON) - try: - parsed = json.loads(line) - if self.logger: - self.logger.debug(f"[SSE] Parsed raw JSON: {parsed}") - yield parsed - except json.JSONDecodeError: - # Not JSON, skip - pass - - def _make_status_error_from_response( - self, - response: httpx.Response, - ) -> openai.APIStatusError: - """Function reproduced from openai._client to handle UiPath errors.""" - if response.is_closed and not response.is_stream_consumed: - # We can't read the response body as it has been closed - # before it was read. This can happen if an event hook - # raises a status error. - body = None - err_msg = f"Error code: {response.status_code}" - else: - err_text = response.text.strip() - body = err_text - - try: - body = json.loads(err_text) - err_msg = f"Error code: {response.status_code} - {body}" - except Exception: - err_msg = err_text or f"Error code: {response.status_code}" - - return self._make_status_error(err_msg, body=body, response=response) - - def _make_status_error( - self, - err_msg: str, - *, - body: object, - response: httpx.Response, - ) -> openai.APIStatusError: - """Function reproduced from openai._client to handle UiPath errors.""" - data = body.get("error", body) if isinstance(body, Mapping) else body - if response.status_code == 400: - return openai.BadRequestError(err_msg, response=response, body=data) - - if response.status_code == 401: - return openai.AuthenticationError(err_msg, response=response, body=data) - - if response.status_code == 403: - # Check if this is a license-specific error - if isinstance(body, dict): - title = body.get("title", "").lower() - if title == "license not available": - raise LangGraphRuntimeError( - code=LangGraphErrorCode.LICENSE_NOT_AVAILABLE, - title=body.get("title", "License Not Available"), - detail=body.get( - "detail", "License not available for this service" - ), - category=UiPathErrorCategory.DEPLOYMENT, - ) - - return openai.PermissionDeniedError(err_msg, response=response, body=data) - - if response.status_code == 404: - return openai.NotFoundError(err_msg, response=response, body=data) - - if response.status_code == 409: - return openai.ConflictError(err_msg, response=response, body=data) - - if response.status_code == 422: - return openai.UnprocessableEntityError( - err_msg, response=response, body=data - ) - - if response.status_code == 429: - return openai.RateLimitError(err_msg, response=response, body=data) - - if response.status_code >= 500: - return openai.InternalServerError(err_msg, response=response, body=data) - return openai.APIStatusError(err_msg, response=response, body=data) - - def _log_request_duration(self, request: httpx.Request): - """Log the start time of the request.""" - if self.logger: - request.extensions["start_time"] = time.monotonic() - - def _log_response_duration(self, response: httpx.Response): - """Log the duration of the request.""" - if self.logger: - start_time = response.request.extensions.get("start_time") - if start_time: - duration = time.monotonic() - start_time - type = "embedding" - if not isinstance(self, Embeddings): - type = "normalized" if self.is_normalized else "completion" - self.logger.info( - f"[uipath_langchain_client] Request to {response.request.url} took {duration:.2f} seconds.", - extra={ - "requestUrl": f"{response.request.url}", - "duration": f"{duration:.2f}", - "type": type, - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else { - "requestUrl": f"{response.request.url}", - "duration": f"{duration:.2f}", - "type": type, - }, - ) - - async def _alog_request_duration(self, request: httpx.Request): - """Log the start time of the request.""" - self._log_request_duration(request) - - async def _alog_response_duration(self, response: httpx.Response): - """Log the duration of the request.""" - self._log_response_duration(response) - - @property - def _llm_type(self) -> str: - """Get the type of language model used by this chat model. Used for logging purposes only.""" - return "uipath" - - @property - def _identifying_params(self) -> dict[str, Any]: - return { - "url": self.url, - "model": self.model_name, - "temperature": self.temperature, - "max_tokens": self.max_tokens, - "frequency_penalty": self.frequency_penalty, - "presence_penalty": self.presence_penalty, - } - - def _prepare_url(self, url: str) -> httpx.URL: - return httpx.URL(self.url) - - def _build_headers(self, options, retries_taken: int = 0) -> httpx.Headers: - return httpx.Headers(self.auth_headers) - - @property - def url(self) -> str: - if not self._url: - env_uipath_url = os.getenv("UIPATH_URL") - - if env_uipath_url: - self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}" - else: - self._url = ( - f"{self.base_url}/{self.org_id}/{self.tenant_id}/{self.endpoint}" - ) - return self._url - - @property - def endpoint(self) -> str: - raise NotImplementedError( - "The endpoint property is not implemented for this class." - ) - - @property - def auth_headers(self) -> dict[str, str]: - if not self._auth_headers: - self._auth_headers = { - **self.default_headers, # type: ignore - "Authorization": f"Bearer {self.access_token}", - "X-UiPath-LlmGateway-TimeoutSeconds": str(self.default_request_timeout), - } - if self.agenthub_config: - self._auth_headers["X-UiPath-AgentHub-Config"] = self.agenthub_config - if self.byo_connection_id: - self._auth_headers["X-UiPath-LlmGateway-ByoIsConnectionId"] = ( - self.byo_connection_id - ) - if self.is_normalized and self.model_name: - self._auth_headers["X-UiPath-LlmGateway-NormalizedApi-ModelName"] = ( - self.model_name - ) - if self.include_account_id: - self._auth_headers["x-uipath-internal-accountid"] = self.org_id - self._auth_headers["x-uipath-internal-tenantid"] = self.tenant_id - return self._auth_headers - - def _get_llm_string(self, stop: list[str] | None = None, **kwargs: Any) -> str: - serialized_repr = getattr(self, "_serialized", self.model_dump()) - _cleanup_llm_representation(serialized_repr, 1) - kwargs = serialized_repr.get("kwargs", serialized_repr) - for key in [ - "base_url", - "access_token", - "client_id", - "client_secret", - "org_id", - "tenant_id", - "requesting_product", - "requesting_feature", - "azure_endpoint", - "openai_api_version", - "openai_api_key", - "default_request_timeout", - "max_retries", - "base_delay", - "max_delay", - "logger", - "settings", - ]: - if key in kwargs: - kwargs.pop(key, None) - llm_string = json.dumps(serialized_repr, sort_keys=True) - return llm_string - - @property - def is_normalized(self) -> bool: - return False diff --git a/src/uipath_langchain/_utils/_settings.py b/src/uipath_langchain/_utils/_settings.py deleted file mode 100644 index ac9676017..000000000 --- a/src/uipath_langchain/_utils/_settings.py +++ /dev/null @@ -1,87 +0,0 @@ -# mypy: disable-error-code="syntax" -import os -from typing import Any - -import httpx -from pydantic import Field -from pydantic_settings import BaseSettings -from uipath._utils._ssl_context import get_httpx_client_kwargs - - -class UiPathCachedPathsSettings(BaseSettings): - cached_completion_db: str = Field( - default=os.path.join( - os.path.dirname(__file__), "tests", "tests_uipath_cache.db" - ), - alias="CACHED_COMPLETION_DB", - ) - cached_embeddings_dir: str = Field( - default=os.path.join(os.path.dirname(__file__), "tests", "cached_embeddings"), - alias="CACHED_EMBEDDINGS_DIR", - ) - - -uipath_cached_paths_settings = UiPathCachedPathsSettings() -uipath_token_header: str | None = None - - -class UiPathClientFactorySettings(BaseSettings): - base_url: str = Field(default="", alias="UIPATH_BASE_URL") - client_id: str = Field(default="", alias="UIPATH_CLIENT_ID") - client_secret: str = Field(default="", alias="UIPATH_CLIENT_SECRET") - - -class UiPathClientSettings(BaseSettings): - access_token: str = Field(default_factory=lambda: get_uipath_token_header()) - base_url: str = Field(default="", alias="UIPATH_BASE_URL") - org_id: str = Field(default="", alias="UIPATH_ORGANIZATION_ID") - tenant_id: str = Field(default="", alias="UIPATH_TENANT_ID") - requesting_product: str = Field( - default="uipath-python-sdk", alias="UIPATH_REQUESTING_PRODUCT" - ) - requesting_feature: str = Field( - default="langgraph-agent", alias="UIPATH_REQUESTING_FEATURE" - ) - timeout_seconds: str = Field(default="120", alias="UIPATH_TIMEOUT_SECONDS") - action_name: str = Field(default="DefaultActionName", alias="UIPATH_ACTION_NAME") - action_id: str = Field(default="DefaultActionId", alias="UIPATH_ACTION_ID") - - -def get_uipath_token_header( - settings: Any = None, -) -> str: - global uipath_token_header - if not uipath_token_header: - settings = settings or UiPathClientFactorySettings() - url_get_token = f"{settings.base_url}/identity_/connect/token" - token_credentials = dict( - client_id=settings.client_id, - client_secret=settings.client_secret, - grant_type="client_credentials", - ) - with httpx.Client(**get_httpx_client_kwargs()) as client: - res = client.post(url_get_token, data=token_credentials) - res_json = res.json() - uipath_token_header = res_json.get("access_token") - - return uipath_token_header or "" - - -async def get_token_header_async( - settings: Any = None, -) -> str: - global uipath_token_header - if not uipath_token_header: - settings = settings or UiPathClientFactorySettings() - url_get_token = f"{settings.base_url}/identity_/connect/token" - token_credentials = dict( - client_id=settings.client_id, - client_secret=settings.client_secret, - grant_type="client_credentials", - ) - - with httpx.Client(**get_httpx_client_kwargs()) as client: - res_json = client.post(url_get_token, data=token_credentials).json() - uipath_token_header = res_json.get("access_token") - - return uipath_token_header or "" diff --git a/src/uipath_langchain/_utils/_sleep_policy.py b/src/uipath_langchain/_utils/_sleep_policy.py deleted file mode 100644 index c059c7964..000000000 --- a/src/uipath_langchain/_utils/_sleep_policy.py +++ /dev/null @@ -1,41 +0,0 @@ -import logging -from typing import Callable - -from tenacity import ( - RetryCallState, - _utils, -) - - -def before_sleep_log( - logger: "logging.Logger", - log_level: int, - exc_info: bool = False, -) -> Callable[["RetryCallState"], None]: - """Before call strategy that logs to some logger the attempt.""" - - def log_it(retry_state: "RetryCallState") -> None: - if retry_state.outcome is None: - raise RuntimeError("log_it() called before outcome was set") - - if retry_state.next_action is None: - raise RuntimeError("log_it() called before next_action was set") - - if retry_state.outcome.failed: - ex = retry_state.outcome.exception() - verb, value = "raised", f"{ex.__class__.__name__}: {ex}" - else: - verb, value = "returned", retry_state.outcome.result() - - if retry_state.fn is None: - fn_name = "" - else: - fn_name = _utils.get_callback_name(retry_state.fn) - - logger.log( - log_level, - f"Retrying #{retry_state.attempt_number} {fn_name} in {retry_state.next_action.sleep} seconds as it {verb} {value}.", - {"retries": f"{retry_state.attempt_number}"}, - ) - - return log_it diff --git a/src/uipath_langchain/agent/react/agent.py b/src/uipath_langchain/agent/react/agent.py index 2e183695b..2e284748e 100644 --- a/src/uipath_langchain/agent/react/agent.py +++ b/src/uipath_langchain/agent/react/agent.py @@ -7,13 +7,8 @@ from langgraph.graph import StateGraph from pydantic import BaseModel from uipath.platform.guardrails import BaseGuardrail -from uipath.runtime.errors import UiPathErrorCategory -from uipath_langchain.agent.exceptions import ( - AgentStartupError, - AgentStartupErrorCode, -) -from uipath_langchain.chat.types import UiPathPassthroughChatModel +from uipath_langchain.chat import UiPathBaseLLMClient from ..guardrails.actions import GuardrailAction from .guardrails.guardrails_subgraph import ( @@ -70,18 +65,13 @@ def create_agent( """ from ..tools import create_tool_node - if not isinstance(model, UiPathPassthroughChatModel): - raise AgentStartupError( - code=AgentStartupErrorCode.LLM_INVALID_MODEL, - title=f"Model {type(model).__name__} does not implement UiPathPassthroughChatModel.", - detail="The model must have llm_provider and api_flavor properties.", - category=UiPathErrorCategory.SYSTEM, + if isinstance(model, UiPathBaseLLMClient): + agent_settings = AgentSettings( + llm_provider=model.api_config.vendor_type, + api_flavor=model.api_config.api_flavor, ) - - agent_settings = AgentSettings( - llm_provider=model.llm_provider, - api_flavor=model.api_flavor, - ) + else: + agent_settings = None if config is None: config = AgentGraphConfig() diff --git a/src/uipath_langchain/agent/react/llm_node.py b/src/uipath_langchain/agent/react/llm_node.py index 551ea0914..0244183a2 100644 --- a/src/uipath_langchain/agent/react/llm_node.py +++ b/src/uipath_langchain/agent/react/llm_node.py @@ -65,7 +65,6 @@ def create_llm_node( """ bindable_tools = list(tools) if tools else [] payload_handler = get_payload_handler(model) - tool_choice_required_value = payload_handler.get_required_tool_choice() async def llm_node(state: StateT): messages: list[AnyMessage] = state.messages @@ -83,19 +82,15 @@ async def llm_node(state: StateT): static_schema_tools = _apply_tool_argument_properties( bindable_tools, state, input_schema ) - parallel_kwargs = payload_handler.get_parallel_tool_calls_kwargs( - enable_openai_parallel_tool_calls - ) - base_llm = model.bind_tools(static_schema_tools, **parallel_kwargs) if ( not is_conversational and bindable_tools and consecutive_thinking_messages >= thinking_messages_limit ): - llm = base_llm.bind(tool_choice=tool_choice_required_value) + llm = model.bind_tools(static_schema_tools, tool_choice="any") else: - llm = base_llm + llm = model.bind_tools(static_schema_tools) response = await llm.ainvoke(messages) if not isinstance(response, AIMessage): @@ -107,7 +102,8 @@ async def llm_node(state: StateT): category=UiPathErrorCategory.SYSTEM, ) - payload_handler.check_stop_reason(response) + if payload_handler is not None: + payload_handler.check_stop_reason(response) # filter out flow control tools when multiple tool calls exist if response.tool_calls: diff --git a/src/uipath_langchain/chat/__init__.py b/src/uipath_langchain/chat/__init__.py index 69e0e2b98..625c9db11 100644 --- a/src/uipath_langchain/chat/__init__.py +++ b/src/uipath_langchain/chat/__init__.py @@ -13,16 +13,20 @@ def __getattr__(name): - if name == "UiPathAzureChatOpenAI": - from .models import UiPathAzureChatOpenAI + if name == "UiPathBaseLLMClient": + from uipath_langchain_client.base_client import UiPathBaseLLMClient - return UiPathAzureChatOpenAI + return UiPathBaseLLMClient if name == "UiPathChat": - from .models import UiPathChat + from uipath_langchain_client.clients.normalized import UiPathNormalizedChatModel - return UiPathChat + return UiPathNormalizedChatModel + if name == "UiPathAzureChatOpenAI": + from uipath_langchain_client.clients.openai import UiPathAzureChatOpenAI + + return UiPathAzureChatOpenAI if name == "UiPathChatOpenAI": - from .openai import UiPathChatOpenAI + from uipath_langchain_client.clients.openai import UiPathChatOpenAI return UiPathChatOpenAI if name == "requires_approval": @@ -33,18 +37,43 @@ def __getattr__(name): from . import supported_models return getattr(supported_models, name) - if name in ("LLMProvider", "APIFlavor", "UiPathPassthroughChatModel"): + if name in ("LLMProvider", "APIFlavor"): from . import types return getattr(types, name) + if name == "UiPathChatBedrock": + from uipath_langchain_client.clients.bedrock import UiPathChatBedrock + + return UiPathChatBedrock + if name == "UiPathChatBedrockConverse": + from uipath_langchain_client.clients.bedrock import UiPathChatBedrockConverse + + return UiPathChatBedrockConverse + if name == "UiPathChatGoogleGenerativeAI": + from uipath_langchain_client.clients.google import UiPathChatGoogleGenerativeAI + + return UiPathChatGoogleGenerativeAI + if name == "UiPathChatAnthropic": + from uipath_langchain_client.clients.anthropic import UiPathChatAnthropic + + return UiPathChatAnthropic + if name == "UiPathChatAnthropicVertex": + from uipath_langchain_client.clients.vertexai import UiPathChatAnthropicVertex + + return UiPathChatAnthropicVertex raise AttributeError(f"module {__name__!r} has no attribute {name!r}") __all__ = [ + "UiPathBaseLLMClient", "UiPathChat", "UiPathAzureChatOpenAI", "UiPathChatOpenAI", - "UiPathPassthroughChatModel", + "UiPathChatBedrock", + "UiPathChatBedrockConverse", + "UiPathChatGoogleGenerativeAI", + "UiPathChatAnthropic", + "UiPathChatAnthropicVertex", "OpenAIModels", "BedrockModels", "GeminiModels", diff --git a/src/uipath_langchain/chat/bedrock.py b/src/uipath_langchain/chat/bedrock.py deleted file mode 100644 index a29631deb..000000000 --- a/src/uipath_langchain/chat/bedrock.py +++ /dev/null @@ -1,348 +0,0 @@ -import logging -import os -from collections.abc import Iterator -from typing import Any, Optional - -from langchain_core.callbacks import CallbackManagerForLLMRun -from langchain_core.messages import BaseMessage -from langchain_core.outputs import ChatGenerationChunk, ChatResult -from tenacity import AsyncRetrying, Retrying -from uipath._utils import resource_override -from uipath.utils import EndpointManager - -from .header_capture import HeaderCapture -from .retryers.bedrock import AsyncBedrockRetryer, BedrockRetryer -from .supported_models import BedrockModels -from .types import APIFlavor, LLMProvider - -logger = logging.getLogger(__name__) - - -def _check_bedrock_dependencies() -> None: - """Check if required dependencies for UiPathChatBedrock are installed.""" - import importlib.util - - missing_packages = [] - - if importlib.util.find_spec("langchain_aws") is None: - missing_packages.append("langchain-aws") - - if importlib.util.find_spec("boto3") is None: - missing_packages.append("boto3") - - if missing_packages: - packages_str = ", ".join(missing_packages) - raise ImportError( - f"The following packages are required to use UiPathChatBedrock: {packages_str}\n" - "Please install them using one of the following methods:\n\n" - " # Using pip:\n" - f" pip install uipath-langchain[bedrock]\n\n" - " # Using uv:\n" - f" uv add 'uipath-langchain[bedrock]'\n\n" - ) - - -_check_bedrock_dependencies() - -import boto3 -import botocore.config -from langchain_aws import ( - ChatBedrock, - ChatBedrockConverse, -) - - -class AwsBedrockCompletionsPassthroughClient: - @resource_override( - resource_identifier="byo_connection_id", resource_type="connection" - ) - def __init__( - self, - model: str, - token: str, - api_flavor: str, - agenthub_config: Optional[str] = None, - byo_connection_id: Optional[str] = None, - header_capture: HeaderCapture | None = None, - ): - self.model = model - self.token = token - self.api_flavor = api_flavor - self.agenthub_config = agenthub_config - self.byo_connection_id = byo_connection_id - self._vendor = "awsbedrock" - self._url: Optional[str] = None - self.header_capture = header_capture - - @property - def endpoint(self) -> str: - vendor_endpoint = EndpointManager.get_vendor_endpoint() - formatted_endpoint = vendor_endpoint.format( - vendor=self._vendor, - model=self.model, - ) - return formatted_endpoint - - def _build_base_url(self) -> str: - if not self._url: - env_uipath_url = os.getenv("UIPATH_URL") - - if env_uipath_url: - self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}" - else: - raise ValueError("UIPATH_URL environment variable is required") - - return self._url - - def _capture_response_headers(self, parsed, model, **kwargs): - if "ResponseMetadata" in parsed: - headers = parsed["ResponseMetadata"].get("HTTPHeaders", {}) - if self.header_capture: - self.header_capture.set(dict(headers)) - - def get_client(self): - client = boto3.client( - "bedrock-runtime", - region_name="none", - aws_access_key_id="none", - aws_secret_access_key="none", - config=botocore.config.Config( - retries={ - "total_max_attempts": 1, - } - ), - ) - client.meta.events.register( - "before-send.bedrock-runtime.*", self._modify_request - ) - client.meta.events.register( - "after-call.bedrock-runtime.*", self._capture_response_headers - ) - return client - - def _modify_request(self, request, **kwargs): - """Intercept boto3 request and redirect to LLM Gateway""" - # Detect streaming based on URL suffix: - # - converse-stream / invoke-with-response-stream -> streaming - # - converse / invoke -> non-streaming - streaming = "true" if request.url.endswith("-stream") else "false" - request.url = self._build_base_url() - - headers = { - "Authorization": f"Bearer {self.token}", - "X-UiPath-LlmGateway-ApiFlavor": self.api_flavor, - "X-UiPath-Streaming-Enabled": streaming, - } - - if self.agenthub_config: - headers["X-UiPath-AgentHub-Config"] = self.agenthub_config - if self.byo_connection_id: - headers["X-UiPath-LlmGateway-ByoIsConnectionId"] = self.byo_connection_id - job_key = os.getenv("UIPATH_JOB_KEY") - process_key = os.getenv("UIPATH_PROCESS_KEY") - if job_key: - headers["X-UiPath-JobKey"] = job_key - if process_key: - headers["X-UiPath-ProcessKey"] = process_key - - request.headers.update(headers) - - -class UiPathChatBedrockConverse(ChatBedrockConverse): - llm_provider: LLMProvider = LLMProvider.BEDROCK - api_flavor: APIFlavor = APIFlavor.AWS_BEDROCK_CONVERSE - model: str = "" # For tracing serialization - retryer: Optional[Retrying] = None - aretryer: Optional[AsyncRetrying] = None - - def __init__( - self, - org_id: Optional[str] = None, - tenant_id: Optional[str] = None, - token: Optional[str] = None, - model_name: str = BedrockModels.anthropic_claude_haiku_4_5, - agenthub_config: Optional[str] = None, - byo_connection_id: Optional[str] = None, - retryer: Optional[Retrying] = None, - aretryer: Optional[AsyncRetrying] = None, - **kwargs, - ): - org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID") - tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID") - token = token or os.getenv("UIPATH_ACCESS_TOKEN") - - if not org_id: - raise ValueError( - "UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required" - ) - if not tenant_id: - raise ValueError( - "UIPATH_TENANT_ID environment variable or tenant_id parameter is required" - ) - if not token: - raise ValueError( - "UIPATH_ACCESS_TOKEN environment variable or token parameter is required" - ) - - passthrough_client = AwsBedrockCompletionsPassthroughClient( - model=model_name, - token=token, - api_flavor="converse", - agenthub_config=agenthub_config, - byo_connection_id=byo_connection_id, - ) - - client = passthrough_client.get_client() - kwargs["client"] = client - kwargs["model"] = model_name - super().__init__(**kwargs) - self.model = model_name - self.retryer = retryer - self.aretryer = aretryer - - def invoke(self, *args, **kwargs): - retryer = self.retryer or _get_default_retryer() - return retryer(super().invoke, *args, **kwargs) - - async def ainvoke(self, *args, **kwargs): - retryer = self.aretryer or _get_default_async_retryer() - return await retryer(super().ainvoke, *args, **kwargs) - - -class UiPathChatBedrock(ChatBedrock): - llm_provider: LLMProvider = LLMProvider.BEDROCK - api_flavor: APIFlavor = APIFlavor.AWS_BEDROCK_INVOKE - model: str = "" # For tracing serialization - retryer: Optional[Retrying] = None - aretryer: Optional[AsyncRetrying] = None - header_capture: HeaderCapture - - def __init__( - self, - org_id: Optional[str] = None, - tenant_id: Optional[str] = None, - token: Optional[str] = None, - model_name: str = BedrockModels.anthropic_claude_haiku_4_5, - agenthub_config: Optional[str] = None, - byo_connection_id: Optional[str] = None, - retryer: Optional[Retrying] = None, - aretryer: Optional[AsyncRetrying] = None, - **kwargs, - ): - org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID") - tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID") - token = token or os.getenv("UIPATH_ACCESS_TOKEN") - - if not org_id: - raise ValueError( - "UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required" - ) - if not tenant_id: - raise ValueError( - "UIPATH_TENANT_ID environment variable or tenant_id parameter is required" - ) - if not token: - raise ValueError( - "UIPATH_ACCESS_TOKEN environment variable or token parameter is required" - ) - - header_capture = HeaderCapture(name=f"bedrock_headers_{id(self)}") - - passthrough_client = AwsBedrockCompletionsPassthroughClient( - model=model_name, - token=token, - api_flavor="invoke", - agenthub_config=agenthub_config, - byo_connection_id=byo_connection_id, - header_capture=header_capture, - ) - - client = passthrough_client.get_client() - kwargs["client"] = client - kwargs["model"] = model_name - kwargs["header_capture"] = header_capture - super().__init__(**kwargs) - self.model = model_name - self.retryer = retryer - self.aretryer = aretryer - - def invoke(self, *args, **kwargs): - retryer = self.retryer or _get_default_retryer() - return retryer(super().invoke, *args, **kwargs) - - async def ainvoke(self, *args, **kwargs): - retryer = self.aretryer or _get_default_async_retryer() - return await retryer(super().ainvoke, *args, **kwargs) - - @staticmethod - def _convert_file_blocks_to_anthropic_documents( - messages: list[BaseMessage], - ) -> list[BaseMessage]: - """Convert FileContentBlock items to Anthropic document format. - - langchain_aws's _format_data_content_block() does not support - type='file' blocks (only images). This pre-processes messages to - convert PDF FileContentBlocks into Anthropic's native 'document' - format so they pass through formatting without error. - """ - for message in messages: - if not isinstance(message.content, list): - continue - for i, block in enumerate(message.content): - if ( - isinstance(block, dict) - and block.get("type") == "file" - and block.get("mime_type") == "application/pdf" - and "base64" in block - ): - anthropic_block: dict[str, Any] = { - "type": "document", - "source": { - "type": "base64", - "media_type": block["mime_type"], - "data": block["base64"], - }, - } - message.content[i] = anthropic_block - return messages - - def _generate( - self, - messages: list[BaseMessage], - stop: Optional[list[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - messages = self._convert_file_blocks_to_anthropic_documents(messages) - result = super()._generate( - messages, - stop=stop, - run_manager=run_manager, - **kwargs, - ) - self.header_capture.attach_to_chat_result(result) - self.header_capture.clear() - return result - - def _stream( - self, - messages: list[BaseMessage], - stop: Optional[list[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> Iterator[ChatGenerationChunk]: - messages = self._convert_file_blocks_to_anthropic_documents(messages) - chunks = super()._stream(messages, stop=stop, run_manager=run_manager, **kwargs) - - for chunk in chunks: - self.header_capture.attach_to_chat_generation(chunk) - yield chunk - self.header_capture.clear() - - -def _get_default_retryer() -> BedrockRetryer: - return BedrockRetryer(logger=logger) - - -def _get_default_async_retryer() -> AsyncBedrockRetryer: - return AsyncBedrockRetryer(logger=logger) diff --git a/src/uipath_langchain/chat/chat_model_factory.py b/src/uipath_langchain/chat/chat_model_factory.py deleted file mode 100644 index 40f058dd4..000000000 --- a/src/uipath_langchain/chat/chat_model_factory.py +++ /dev/null @@ -1,276 +0,0 @@ -from typing import Any - -from langchain_core.language_models import BaseChatModel - -from uipath_langchain.chat.types import APIFlavor, LLMProvider - -_DEFAULT_API_FLAVOR: dict[LLMProvider, APIFlavor] = { - LLMProvider.OPENAI: APIFlavor.OPENAI_RESPONSES, - LLMProvider.BEDROCK: APIFlavor.AWS_BEDROCK_CONVERSE, - LLMProvider.VERTEX: APIFlavor.VERTEX_GEMINI_GENERATE_CONTENT, -} - -_API_FLAVOR_TO_PROVIDER: dict[APIFlavor, LLMProvider] = { - APIFlavor.OPENAI_RESPONSES: LLMProvider.OPENAI, - APIFlavor.OPENAI_COMPLETIONS: LLMProvider.OPENAI, - APIFlavor.AWS_BEDROCK_CONVERSE: LLMProvider.BEDROCK, - APIFlavor.AWS_BEDROCK_INVOKE: LLMProvider.BEDROCK, - APIFlavor.VERTEX_GEMINI_GENERATE_CONTENT: LLMProvider.VERTEX, - APIFlavor.VERTEX_ANTHROPIC_CLAUDE: LLMProvider.VERTEX, -} - - -def _fetch_discovery(agenthub_config: str) -> list[dict[str, Any]]: - """Fetch available models from LLM Gateway discovery endpoint.""" - from uipath.platform import UiPath - - sdk = UiPath() - models = sdk.agenthub.get_available_llm_models( - headers={"X-UiPath-AgentHub-Config": agenthub_config} - ) - return [model.model_dump(by_alias=True) for model in models] - - -def _create_openai_llm( - model: str, - api_flavor: APIFlavor, - temperature: float, - max_tokens: int, - agenthub_config: str, - byo_connection_id: str | None = None, - **kwargs: Any, -) -> BaseChatModel: - """Create UiPathChatOpenAI for OpenAI models via LLMGateway.""" - from uipath_langchain.chat.openai import UiPathChatOpenAI - - azure_open_ai_latest_api_version = "2025-04-01-preview" - - match api_flavor: - case APIFlavor.OPENAI_RESPONSES: - return UiPathChatOpenAI( - use_responses_api=True, - model_name=model, - temperature=temperature, - max_tokens=max_tokens, - api_version=azure_open_ai_latest_api_version, - agenthub_config=agenthub_config, - byo_connection_id=byo_connection_id, - output_version="v1", - **kwargs, - ) - case APIFlavor.OPENAI_COMPLETIONS: - return UiPathChatOpenAI( - use_responses_api=False, - model_name=model, - temperature=temperature, - max_tokens=max_tokens, - api_version=azure_open_ai_latest_api_version, - agenthub_config=agenthub_config, - byo_connection_id=byo_connection_id, - output_version="v1", - **kwargs, - ) - case _: - raise ValueError(f"Unknown api_flavor={api_flavor} for OpenAI") - - -def _create_bedrock_llm( - model: str, - api_flavor: APIFlavor, - temperature: float, - max_tokens: int, - agenthub_config: str, - byo_connection_id: str | None = None, - **kwargs: Any, -) -> BaseChatModel: - """Create UiPathChatBedrockConverse for Claude models via LLMGateway.""" - from uipath_langchain.chat.bedrock import ( - UiPathChatBedrock, - UiPathChatBedrockConverse, - ) - - match api_flavor: - case APIFlavor.AWS_BEDROCK_CONVERSE: - return UiPathChatBedrockConverse( - model_name=model, - temperature=temperature, - max_tokens=max_tokens, - agenthub_config=agenthub_config, - byo_connection_id=byo_connection_id, - output_version="v1", - **kwargs, - ) - case APIFlavor.AWS_BEDROCK_INVOKE: - return UiPathChatBedrock( - model_name=model, - temperature=temperature, - max_tokens=max_tokens, - agenthub_config=agenthub_config, - byo_connection_id=byo_connection_id, - output_version="v1", - **kwargs, - ) - case _: - raise ValueError(f"Unknown api_flavor={api_flavor} for AwsBedrock") - - -def _create_vertex_llm( - model: str, - api_flavor: APIFlavor, - temperature: float, - max_tokens: int | None, - agenthub_config: str, - byo_connection_id: str | None = None, - **kwargs: Any, -) -> BaseChatModel: - """Create UiPathChatVertex for Gemini models via LLMGateway.""" - from uipath_langchain.chat.vertex import UiPathChatVertex - - match api_flavor: - case APIFlavor.VERTEX_GEMINI_GENERATE_CONTENT: - return UiPathChatVertex( - model_name=model, - temperature=temperature, - max_tokens=max_tokens, - agenthub_config=agenthub_config, - byo_connection_id=byo_connection_id, - output_version="v1", - **kwargs, - ) - case APIFlavor.VERTEX_ANTHROPIC_CLAUDE: - raise ValueError(f"api_flavor={api_flavor} is not yet supported for Vertex") - case _: - raise ValueError(f"Unknown api_flavor={api_flavor} for Vertex") - - -def _resolve_vendor(api_flavor: APIFlavor) -> LLMProvider: - return _API_FLAVOR_TO_PROVIDER[api_flavor] - - -def _resolve_api_flavor(vendor: LLMProvider, model_name: str) -> APIFlavor: - if vendor == LLMProvider.VERTEX and "claude" in model_name: - return APIFlavor.VERTEX_ANTHROPIC_CLAUDE - return _DEFAULT_API_FLAVOR[vendor] - - -def _compute_vendor_and_api_flavor( - model: dict[str, Any], -) -> tuple[LLMProvider, APIFlavor]: - vendor = model.get("vendor") - api_flavor = model.get("apiFlavor") - model_name = model.get("modelName", "") - - if api_flavor is None and vendor is None: - raise ValueError( - f"Neither vendor nor apiFlavor provided for model '{model_name}'. " - "At least one must be present." - ) - - if api_flavor is not None and api_flavor not in [p.value for p in APIFlavor]: - raise ValueError( - f"Unknown apiFlavor '{api_flavor}' for model '{model_name}'. " - f"Supported apiFlavors: {[p.value for p in APIFlavor]}" - ) - - if vendor is not None and vendor not in [p.value for p in LLMProvider]: - raise ValueError( - f"Unknown vendor '{vendor}' for model '{model_name}'. " - f"Supported vendors: {[p.value for p in LLMProvider]}" - ) - - resolved_vendor: LLMProvider - resolved_api_flavor: APIFlavor - - if vendor is None and api_flavor is not None: - resolved_api_flavor = APIFlavor(api_flavor) - resolved_vendor = _resolve_vendor(resolved_api_flavor) - elif api_flavor is None and vendor is not None: - resolved_vendor = LLMProvider(vendor) - resolved_api_flavor = _resolve_api_flavor(resolved_vendor, model_name) - else: - assert vendor is not None and api_flavor is not None - resolved_vendor = LLMProvider(vendor) - resolved_api_flavor = APIFlavor(api_flavor) - - return resolved_vendor, resolved_api_flavor - - -def _get_model_info( - model: str, - agenthub_config: str, - byo_connection_id: str | None, -) -> dict[str, Any]: - discovery_models = _fetch_discovery(agenthub_config) - - matching_models = [m for m in discovery_models if m.get("modelName") == model] - - if byo_connection_id: - matching_models = [ - m - for m in matching_models - if (byom_details := m.get("byomDetails")) - and byom_details.get("integrationServiceConnectionId", "").lower() - == byo_connection_id.lower() - ] - - if not byo_connection_id and len(matching_models) > 1: - matching_models = [m for m in matching_models if m.get("byomDetails") is None] - - if not matching_models: - raise ValueError( - f"model='{model}' and byo_connection_id={byo_connection_id}" - + " is not available. It was not returned by the discovery API." - ) - - return matching_models[0] - - -def get_chat_model( - model: str, - temperature: float, - max_tokens: int, - agenthub_config: str, - byo_connection_id: str | None = None, - **kwargs: Any, -) -> BaseChatModel: - """Create and configure LLM instance using LLMGateway API. - - Fetches available models from the discovery API and selects the appropriate - LLM class based on the apiFlavor field from the matching model configuration. - """ - model_info = _get_model_info(model, agenthub_config, byo_connection_id) - - vendor, api_flavor = _compute_vendor_and_api_flavor(model_info) - model_name: str = model_info.get("modelName", model) - - match LLMProvider(vendor): - case LLMProvider.OPENAI: - return _create_openai_llm( - model_name, - api_flavor, - temperature, - max_tokens, - agenthub_config, - byo_connection_id, - **kwargs, - ) - case LLMProvider.BEDROCK: - return _create_bedrock_llm( - model_name, - api_flavor, - temperature, - max_tokens, - agenthub_config, - byo_connection_id, - **kwargs, - ) - case LLMProvider.VERTEX: - return _create_vertex_llm( - model_name, - api_flavor, - temperature, - max_tokens, - agenthub_config, - byo_connection_id, - **kwargs, - ) diff --git a/src/uipath_langchain/chat/handlers/base.py b/src/uipath_langchain/chat/handlers/base.py index 3fc5c09ac..5a8bd5440 100644 --- a/src/uipath_langchain/chat/handlers/base.py +++ b/src/uipath_langchain/chat/handlers/base.py @@ -12,17 +12,6 @@ class ModelPayloadHandler(ABC): Each handler provides provider-specific parameter values for LLM operations. """ - @abstractmethod - def get_required_tool_choice(self) -> str | dict[str, Any]: - """Get the tool_choice value that enforces tool usage. - - Returns: - Provider-specific value to force tool usage: - - "required" for OpenAI-compatible models - - "any" for Bedrock Converse and Vertex models (string format) - - {"type": "any"} for Bedrock Invoke API (dict format required) - """ - def get_parallel_tool_calls_kwargs( self, parallel_tool_calls: bool ) -> dict[str, Any]: diff --git a/src/uipath_langchain/chat/handlers/bedrock_converse.py b/src/uipath_langchain/chat/handlers/bedrock_converse.py index 83329038b..b0ecbcb64 100644 --- a/src/uipath_langchain/chat/handlers/bedrock_converse.py +++ b/src/uipath_langchain/chat/handlers/bedrock_converse.py @@ -1,7 +1,5 @@ """Bedrock Converse payload handler.""" -from typing import Any - from langchain_core.messages import AIMessage from uipath.runtime.errors import UiPathErrorCategory @@ -41,10 +39,6 @@ class BedrockConversePayloadHandler(ModelPayloadHandler): """Payload handler for AWS Bedrock Converse API.""" - def get_required_tool_choice(self) -> str | dict[str, Any]: - """Get tool_choice value for Bedrock Converse API.""" - return "any" - def check_stop_reason(self, response: AIMessage) -> None: """Check Bedrock Converse stopReason and raise exception for faulty terminations. diff --git a/src/uipath_langchain/chat/handlers/bedrock_invoke.py b/src/uipath_langchain/chat/handlers/bedrock_invoke.py index d69514b13..2b85f7c78 100644 --- a/src/uipath_langchain/chat/handlers/bedrock_invoke.py +++ b/src/uipath_langchain/chat/handlers/bedrock_invoke.py @@ -1,7 +1,5 @@ """Bedrock Invoke payload handler.""" -from typing import Any - from langchain_core.messages import AIMessage from uipath.runtime.errors import UiPathErrorCategory @@ -36,10 +34,6 @@ class BedrockInvokePayloadHandler(ModelPayloadHandler): """Payload handler for AWS Bedrock Invoke API.""" - def get_required_tool_choice(self) -> str | dict[str, Any]: - """Get tool_choice value for Bedrock Invoke API.""" - return {"type": "any"} - def check_stop_reason(self, response: AIMessage) -> None: """Check Bedrock Invoke stop_reason and raise exception for faulty terminations. diff --git a/src/uipath_langchain/chat/handlers/handler_factory.py b/src/uipath_langchain/chat/handlers/handler_factory.py index 1175daa19..a65150660 100644 --- a/src/uipath_langchain/chat/handlers/handler_factory.py +++ b/src/uipath_langchain/chat/handlers/handler_factory.py @@ -5,7 +5,6 @@ from uipath_langchain.chat.types import ( APIFlavor, LLMProvider, - UiPathPassthroughChatModel, ) from .base import ModelPayloadHandler @@ -30,30 +29,19 @@ } -def get_payload_handler(model: BaseChatModel) -> ModelPayloadHandler: +def get_payload_handler(model: BaseChatModel) -> ModelPayloadHandler | None: """Get the appropriate payload handler for a model. Args: - model: A UiPath chat model instance with llm_provider and api_flavor. + model: A UiPath chat model instance with llm_provider and api_flavor. Returns: - A ModelPayloadHandler instance for the model. - - Raises: - TypeError: If the model doesn't implement UiPathPassthroughChatModel. - ValueError: If no handler is registered for the model's provider/API flavor. + A ModelPayloadHandler instance for the model or None if could not be determined. """ - if not isinstance(model, UiPathPassthroughChatModel): - raise TypeError( - f"Model {type(model).__name__} does not implement UiPathPassthroughChatModel" - ) - key = (model.llm_provider, model.api_flavor) - handler_class = _HANDLER_REGISTRY.get(key) - - if handler_class is None: - raise ValueError( - f"No payload handler registered for provider={model.llm_provider}, " - f"api_flavor={model.api_flavor}" - ) + try: + key = (model.api_config.vendor_type, model.api_config.api_flavor) + handler_class = _HANDLER_REGISTRY[key] + except (AttributeError, KeyError) as _: + return None return handler_class() diff --git a/src/uipath_langchain/chat/handlers/openai_completions.py b/src/uipath_langchain/chat/handlers/openai_completions.py index 003c0c294..b7bff3898 100644 --- a/src/uipath_langchain/chat/handlers/openai_completions.py +++ b/src/uipath_langchain/chat/handlers/openai_completions.py @@ -30,10 +30,6 @@ class OpenAICompletionsPayloadHandler(ModelPayloadHandler): """Payload handler for OpenAI Chat Completions API.""" - def get_required_tool_choice(self) -> str | dict[str, Any]: - """Get tool_choice value for OpenAI Completions API.""" - return "required" - def get_parallel_tool_calls_kwargs( self, parallel_tool_calls: bool ) -> dict[str, Any]: diff --git a/src/uipath_langchain/chat/handlers/openai_responses.py b/src/uipath_langchain/chat/handlers/openai_responses.py index 935124006..402921edc 100644 --- a/src/uipath_langchain/chat/handlers/openai_responses.py +++ b/src/uipath_langchain/chat/handlers/openai_responses.py @@ -1,7 +1,5 @@ """OpenAI payload handlers.""" -from typing import Any - from langchain_core.messages import AIMessage from uipath.runtime.errors import UiPathErrorCategory @@ -34,15 +32,6 @@ class OpenAIResponsesPayloadHandler(ModelPayloadHandler): """Payload handler for OpenAI Responses API.""" - def get_required_tool_choice(self) -> str | dict[str, Any]: - """Get tool_choice value for OpenAI Responses API.""" - return "required" - - def get_parallel_tool_calls_kwargs( - self, parallel_tool_calls: bool - ) -> dict[str, Any]: - return {"parallel_tool_calls": parallel_tool_calls} - def check_stop_reason(self, response: AIMessage) -> None: """Check OpenAI Responses API status and raise exception for faulty terminations. diff --git a/src/uipath_langchain/chat/handlers/vertex_gemini.py b/src/uipath_langchain/chat/handlers/vertex_gemini.py index 8804030d1..370c02124 100644 --- a/src/uipath_langchain/chat/handlers/vertex_gemini.py +++ b/src/uipath_langchain/chat/handlers/vertex_gemini.py @@ -1,7 +1,5 @@ """Vertex Gemini payload handler.""" -from typing import Any - from langchain_core.messages import AIMessage from uipath.runtime.errors import UiPathErrorCategory @@ -114,10 +112,6 @@ class VertexGeminiPayloadHandler(ModelPayloadHandler): """Payload handler for Google Vertex AI Gemini API.""" - def get_required_tool_choice(self) -> str | dict[str, Any]: - """Get tool_choice value for Vertex Gemini API.""" - return "any" - def check_stop_reason(self, response: AIMessage) -> None: """Check Vertex Gemini finishReason and raise exception for faulty terminations. diff --git a/src/uipath_langchain/chat/models.py b/src/uipath_langchain/chat/models.py deleted file mode 100644 index 46e7d9977..000000000 --- a/src/uipath_langchain/chat/models.py +++ /dev/null @@ -1,482 +0,0 @@ -import json -import logging -from typing import Any, AsyncIterator, Iterator, Literal, Union - -from langchain_core.callbacks import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun, -) -from langchain_core.language_models import LanguageModelInput -from langchain_core.language_models.chat_models import ( - agenerate_from_stream, - generate_from_stream, -) -from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage -from langchain_core.messages.ai import UsageMetadata -from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult -from langchain_core.runnables import Runnable -from langchain_openai.chat_models import AzureChatOpenAI -from pydantic import BaseModel -from uipath.utils import EndpointManager - -from uipath_langchain._utils._request_mixin import UiPathRequestMixin - -logger = logging.getLogger(__name__) - - -class UiPathAzureChatOpenAI(UiPathRequestMixin, AzureChatOpenAI): - """Custom LLM connector for LangChain integration with UiPath.""" - - def _generate( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: CallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> ChatResult: - if "tools" in kwargs and not kwargs["tools"]: - del kwargs["tools"] - - if self.streaming: - stream_iter = self._stream( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - return generate_from_stream(stream_iter) - - payload = self._get_request_payload(messages, stop=stop, **kwargs) - response = self._call(self.url, payload, self.auth_headers) - return self._create_chat_result(response) - - async def _agenerate( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: AsyncCallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> ChatResult: - if "tools" in kwargs and not kwargs["tools"]: - del kwargs["tools"] - - if self.streaming: - stream_iter = self._astream( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - return await agenerate_from_stream(stream_iter) - - payload = self._get_request_payload(messages, stop=stop, **kwargs) - response = await self._acall(self.url, payload, self.auth_headers) - return self._create_chat_result(response) - - def _stream( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: CallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> Iterator[ChatGenerationChunk]: - if "tools" in kwargs and not kwargs["tools"]: - del kwargs["tools"] - kwargs["stream"] = True - payload = self._get_request_payload(messages, stop=stop, **kwargs) - - default_chunk_class = AIMessageChunk - - for chunk in self._stream_request(self.url, payload, self.auth_headers): - if self.logger: - self.logger.debug(f"[Stream] Got chunk from _stream_request: {chunk}") - generation_chunk = self._convert_chunk( - chunk, default_chunk_class, include_tool_calls=True - ) - if generation_chunk is None: - if self.logger: - self.logger.debug("[Stream] Skipping None generation_chunk") - continue - - if self.logger: - self.logger.debug( - f"[Stream] Yielding generation_chunk: {generation_chunk}" - ) - - if run_manager: - run_manager.on_llm_new_token( - generation_chunk.text, - chunk=generation_chunk, - ) - - yield generation_chunk - - async def _astream( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: AsyncCallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> AsyncIterator[ChatGenerationChunk]: - if "tools" in kwargs and not kwargs["tools"]: - del kwargs["tools"] - kwargs["stream"] = True - payload = self._get_request_payload(messages, stop=stop, **kwargs) - - default_chunk_class = AIMessageChunk - - async for chunk in self._astream_request(self.url, payload, self.auth_headers): - generation_chunk = self._convert_chunk( - chunk, default_chunk_class, include_tool_calls=True - ) - if generation_chunk is None: - continue - - if run_manager: - await run_manager.on_llm_new_token( - generation_chunk.text, - chunk=generation_chunk, - ) - - yield generation_chunk - - def with_structured_output( - self, - schema: Any = None, - *, - method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema", - include_raw: bool = False, - strict: bool | None = None, - **kwargs: Any, - ) -> Runnable[LanguageModelInput, Any]: - """Model wrapper that returns outputs formatted to match the given schema.""" - schema = ( - schema.model_json_schema() - if isinstance(schema, type) and issubclass(schema, BaseModel) - else schema - ) - return super().with_structured_output( - schema=schema, - method=method, - include_raw=include_raw, - strict=strict, - **kwargs, - ) - - @property - def endpoint(self) -> str: - endpoint = EndpointManager.get_passthrough_endpoint() - logger.debug("Using endpoint: %s", endpoint) - return endpoint.format( - model=self.model_name, api_version=self.openai_api_version - ) - - -class UiPathChat(UiPathRequestMixin, AzureChatOpenAI): - """Custom LLM connector for LangChain integration with UiPath Normalized.""" - - def _create_chat_result( - self, - response: Union[dict[str, Any], BaseModel], - generation_info: dict[Any, Any] | None = None, - ) -> ChatResult: - if not isinstance(response, dict): - response = response.model_dump() - message = response["choices"][0]["message"] - usage = response["usage"] - - ai_message = AIMessage( - content=message.get("content", ""), - usage_metadata=UsageMetadata( - input_tokens=usage.get("prompt_tokens", 0), - output_tokens=usage.get("completion_tokens", 0), - total_tokens=usage.get("total_tokens", 0), - ), - additional_kwargs={}, - response_metadata={ - "token_usage": response["usage"], - "model_name": self.model_name, - "finish_reason": response["choices"][0].get("finish_reason", None), - "system_fingerprint": response["id"], - "created": response["created"], - }, - ) - - if "tool_calls" in message: - ai_message.tool_calls = [ - { - "id": tool["id"], - "name": tool["name"], - "args": tool["arguments"], - "type": "tool_call", - } - for tool in message["tool_calls"] - ] - generation = ChatGeneration(message=ai_message) - return ChatResult(generations=[generation]) - - def _get_request_payload( - self, - input_: LanguageModelInput, - *, - stop: list[str] | None = None, - **kwargs: Any, - ) -> dict[Any, Any]: - payload = super()._get_request_payload(input_, stop=stop, **kwargs) - # hacks to make the request work with uipath normalized - for message in payload["messages"]: - if message["content"] is None: - message["content"] = "" - if "tool_calls" in message: - for tool_call in message["tool_calls"]: - tool_call["name"] = tool_call["function"]["name"] - tool_call["arguments"] = json.loads( - tool_call["function"]["arguments"] - ) - if message["role"] == "tool": - message["content"] = { - "result": message["content"], - "call_id": message["tool_call_id"], - } - return payload - - def _normalize_tool_choice(self, kwargs: dict[str, Any]) -> None: - """Normalize tool_choice for UiPath Gateway compatibility. - - Converts LangChain tool_choice formats to UiPath Gateway format: - - String "required" -> {"type": "required"} - - String "auto" -> {"type": "auto"} - - Dict with function -> {"type": "tool", "name": "function_name"} - """ - if "tool_choice" in kwargs: - tool_choice = kwargs["tool_choice"] - - if isinstance(tool_choice, str): - if tool_choice in ("required", "auto", "none"): - logger.debug( - f"Converting tool_choice from '{tool_choice}' to {{'type': '{tool_choice}'}}" - ) - kwargs["tool_choice"] = {"type": tool_choice} - elif ( - isinstance(tool_choice, dict) and tool_choice.get("type") == "function" - ): - function_name = tool_choice["function"]["name"] - logger.debug( - f"Converting tool_choice from function '{function_name}' to tool format" - ) - kwargs["tool_choice"] = { - "type": "tool", - "name": function_name, - } - - def _generate( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: CallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> ChatResult: - """Override the _generate method to implement the chat model logic. - - This can be a call to an API, a call to a local model, or any other - implementation that generates a response to the input prompt. - - Args: - messages: the prompt composed of a list of messages. - stop: a list of strings on which the model should stop generating. - If generation stops due to a stop token, the stop token itself - SHOULD BE INCLUDED as part of the output. This is not enforced - across models right now, but it's a good practice to follow since - it makes it much easier to parse the output of the model - downstream and understand why generation stopped. - run_manager: A run manager with callbacks for the LLM. - """ - if kwargs.get("tools"): - kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]] - self._normalize_tool_choice(kwargs) - - if self.streaming: - stream_iter = self._stream( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - return generate_from_stream(stream_iter) - - payload = self._get_request_payload(messages, stop=stop, **kwargs) - response = self._call(self.url, payload, self.auth_headers) - return self._create_chat_result(response) - - async def _agenerate( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: AsyncCallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> ChatResult: - """Override the _generate method to implement the chat model logic. - - This can be a call to an API, a call to a local model, or any other - implementation that generates a response to the input prompt. - - Args: - messages: the prompt composed of a list of messages. - stop: a list of strings on which the model should stop generating. - If generation stops due to a stop token, the stop token itself - SHOULD BE INCLUDED as part of the output. This is not enforced - across models right now, but it's a good practice to follow since - it makes it much easier to parse the output of the model - downstream and understand why generation stopped. - run_manager: A run manager with callbacks for the LLM. - """ - if kwargs.get("tools"): - kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]] - self._normalize_tool_choice(kwargs) - - if self.streaming: - stream_iter = self._astream( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - return await agenerate_from_stream(stream_iter) - - payload = self._get_request_payload(messages, stop=stop, **kwargs) - response = await self._acall(self.url, payload, self.auth_headers) - return self._create_chat_result(response) - - def _stream( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: CallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> Iterator[ChatGenerationChunk]: - """Stream the LLM on a given prompt. - - Args: - messages: the prompt composed of a list of messages. - stop: a list of strings on which the model should stop generating. - run_manager: A run manager with callbacks for the LLM. - **kwargs: Additional keyword arguments. - - Returns: - An iterator of ChatGenerationChunk objects. - """ - if kwargs.get("tools"): - kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]] - self._normalize_tool_choice(kwargs) - kwargs["stream"] = True - payload = self._get_request_payload(messages, stop=stop, **kwargs) - - default_chunk_class = AIMessageChunk - - for chunk in self._stream_request(self.url, payload, self.auth_headers): - if self.logger: - self.logger.debug(f"[Stream] Got chunk from _stream_request: {chunk}") - generation_chunk = self._convert_chunk( - chunk, default_chunk_class, include_tool_calls=True - ) - if generation_chunk is None: - if self.logger: - self.logger.debug("[Stream] Skipping None generation_chunk") - continue - - if self.logger: - self.logger.debug( - f"[Stream] Yielding generation_chunk: {generation_chunk}" - ) - - if run_manager: - run_manager.on_llm_new_token( - generation_chunk.text, - chunk=generation_chunk, - ) - - yield generation_chunk - - async def _astream( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: AsyncCallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> AsyncIterator[ChatGenerationChunk]: - """Async stream the LLM on a given prompt. - - Args: - messages: the prompt composed of a list of messages. - stop: a list of strings on which the model should stop generating. - run_manager: A run manager with callbacks for the LLM. - **kwargs: Additional keyword arguments. - - Returns: - An async iterator of ChatGenerationChunk objects. - """ - if kwargs.get("tools"): - kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]] - self._normalize_tool_choice(kwargs) - kwargs["stream"] = True - payload = self._get_request_payload(messages, stop=stop, **kwargs) - - # Update headers to enable streaming - headers = {**self.auth_headers} - headers["X-UiPath-Streaming-Enabled"] = "true" - - default_chunk_class = AIMessageChunk - - async for chunk in self._astream_request(self.url, payload, headers): - generation_chunk = self._convert_chunk( - chunk, default_chunk_class, include_tool_calls=True - ) - if generation_chunk is None: - continue - - if run_manager: - await run_manager.on_llm_new_token( - generation_chunk.text, - chunk=generation_chunk, - ) - - yield generation_chunk - - def with_structured_output( - self, - schema: Any = None, - *, - method: Literal[ - "function_calling", "json_mode", "json_schema" - ] = "function_calling", - include_raw: bool = False, - strict: bool | None = None, - **kwargs: Any, - ) -> Runnable[LanguageModelInput, Any]: - """Model wrapper that returns outputs formatted to match the given schema.""" - if method == "json_schema" and ( - not self.model_name or not self.model_name.startswith("gpt") - ): - method = "function_calling" - if self.logger: - self.logger.warning( - "The json_schema output is not supported for non-GPT models. Using function_calling instead.", - extra={ - "ActionName": self.settings.action_name, - "ActionId": self.settings.action_id, - } - if self.settings - else None, - ) - schema = ( - schema.model_json_schema() - if isinstance(schema, type) and issubclass(schema, BaseModel) - else schema - ) - return super().with_structured_output( - schema=schema, - method=method, - include_raw=include_raw, - strict=strict, - **kwargs, - ) - - @property - def endpoint(self) -> str: - endpoint = EndpointManager.get_normalized_endpoint() - logger.debug("Using endpoint: %s", endpoint) - return endpoint.format( - model=self.model_name, api_version=self.openai_api_version - ) - - @property - def is_normalized(self) -> bool: - return True diff --git a/src/uipath_langchain/chat/openai.py b/src/uipath_langchain/chat/openai.py deleted file mode 100644 index 9a4eb365a..000000000 --- a/src/uipath_langchain/chat/openai.py +++ /dev/null @@ -1,187 +0,0 @@ -import logging -import os -from typing import Optional - -import httpx -from langchain_openai import AzureChatOpenAI -from pydantic import PrivateAttr -from uipath._utils import resource_override -from uipath._utils._ssl_context import get_httpx_client_kwargs -from uipath.utils import EndpointManager - -from .supported_models import OpenAIModels -from .types import APIFlavor, LLMProvider - -logger = logging.getLogger(__name__) - - -def _rewrite_openai_url( - original_url: str, params: httpx.QueryParams -) -> httpx.URL | None: - """Rewrite OpenAI URLs to UiPath gateway completions endpoint. - - Handles three URL patterns: - - responses: false -> .../openai/deployments/.../chat/completions?api-version=... - - responses: true -> .../openai/responses?api-version=... - - responses API base -> .../{model}?api-version=... (no /openai/ path) - - All are rewritten to .../completions - """ - if "/openai/deployments/" in original_url: - base_url = original_url.split("/openai/deployments/")[0] - elif "/openai/responses" in original_url: - base_url = original_url.split("/openai/responses")[0] - else: - # Handle base URL case (no /openai/ path appended yet) - # Strip query string to get base URL - base_url = original_url.split("?")[0] - - new_url_str = f"{base_url}/completions" - if params: - return httpx.URL(new_url_str, params=params) - return httpx.URL(new_url_str) - - -class UiPathURLRewriteTransport(httpx.AsyncHTTPTransport): - def __init__(self, verify: bool = True, **kwargs): - super().__init__(verify=verify, **kwargs) - - async def handle_async_request(self, request: httpx.Request) -> httpx.Response: - new_url = _rewrite_openai_url(str(request.url), request.url.params) - if new_url: - request.url = new_url - - return await super().handle_async_request(request) - - -class UiPathSyncURLRewriteTransport(httpx.HTTPTransport): - def __init__(self, verify: bool = True, **kwargs): - super().__init__(verify=verify, **kwargs) - - def handle_request(self, request: httpx.Request) -> httpx.Response: - new_url = _rewrite_openai_url(str(request.url), request.url.params) - if new_url: - request.url = new_url - - return super().handle_request(request) - - -class UiPathChatOpenAI(AzureChatOpenAI): - llm_provider: LLMProvider = LLMProvider.OPENAI - _api_flavor: APIFlavor = PrivateAttr() - - @property - def api_flavor(self) -> APIFlavor: - return self._api_flavor - - @resource_override( - resource_identifier="byo_connection_id", resource_type="connection" - ) - def __init__( - self, - use_responses_api: bool, - token: Optional[str] = None, - model_name: str = OpenAIModels.gpt_4_1_mini_2025_04_14, - api_version: str = "2024-12-01-preview", - org_id: Optional[str] = None, - tenant_id: Optional[str] = None, - agenthub_config: Optional[str] = None, - extra_headers: Optional[dict[str, str]] = None, - byo_connection_id: Optional[str] = None, - **kwargs, - ): - org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID") - tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID") - token = token or os.getenv("UIPATH_ACCESS_TOKEN") - - if not org_id: - raise ValueError( - "UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required" - ) - if not tenant_id: - raise ValueError( - "UIPATH_TENANT_ID environment variable or tenant_id parameter is required" - ) - if not token: - raise ValueError( - "UIPATH_ACCESS_TOKEN environment variable or token parameter is required" - ) - - self._openai_api_version = api_version - self._vendor = "openai" - self._model_name = model_name - self._url: Optional[str] = None - self._agenthub_config = agenthub_config - self._byo_connection_id = byo_connection_id - self._extra_headers = extra_headers or {} - - client_kwargs = get_httpx_client_kwargs() - verify = client_kwargs.get("verify", True) - - api_flavor = ( - APIFlavor.OPENAI_RESPONSES - if use_responses_api - else APIFlavor.OPENAI_COMPLETIONS - ) - - super().__init__( - azure_endpoint=self._build_base_url(), - model_name=model_name, - default_headers=self._build_headers(token), - http_async_client=httpx.AsyncClient( - transport=UiPathURLRewriteTransport(verify=verify), - **client_kwargs, - ), - http_client=httpx.Client( - transport=UiPathSyncURLRewriteTransport(verify=verify), - **client_kwargs, - ), - api_key=token, - api_version=api_version, - validate_base_url=False, - use_responses_api=use_responses_api, - include_response_headers=True, - **kwargs, - ) - - self._api_flavor = api_flavor - - def _build_headers(self, token: str) -> dict[str, str]: - headers = { - "X-UiPath-LlmGateway-ApiFlavor": "auto", - "Authorization": f"Bearer {token}", - } - - if self._agenthub_config: - headers["X-UiPath-AgentHub-Config"] = self._agenthub_config - if self._byo_connection_id: - headers["X-UiPath-LlmGateway-ByoIsConnectionId"] = self._byo_connection_id - if job_key := os.getenv("UIPATH_JOB_KEY"): - headers["X-UiPath-JobKey"] = job_key - if process_key := os.getenv("UIPATH_PROCESS_KEY"): - headers["X-UiPath-ProcessKey"] = process_key - - # Allow extra_headers to override defaults - headers.update(self._extra_headers) - return headers - - @property - def endpoint(self) -> str: - vendor_endpoint = EndpointManager.get_vendor_endpoint() - formatted_endpoint = vendor_endpoint.format( - vendor=self._vendor, - model=self._model_name, - ) - base_endpoint = formatted_endpoint.replace("/completions", "") - return f"{base_endpoint}?api-version={self._openai_api_version}" - - def _build_base_url(self) -> str: - if not self._url: - env_uipath_url = os.getenv("UIPATH_URL") - - if env_uipath_url: - self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}" - else: - raise ValueError("UIPATH_URL environment variable is required") - - return self._url diff --git a/src/uipath_langchain/chat/retryers/base.py b/src/uipath_langchain/chat/retryers/base.py deleted file mode 100644 index f69ff951e..000000000 --- a/src/uipath_langchain/chat/retryers/base.py +++ /dev/null @@ -1,214 +0,0 @@ -"""Base retry strategy with common functionality for all providers. - -Provides abstract base classes for provider-specific retry implementations. -Each provider should subclass and implement the abstract methods for -extracting retry information from their specific exception types. -""" - -import logging -import random -from abc import ABC, abstractmethod -from typing import Callable, Mapping - -from tenacity import ( - AsyncRetrying, - RetryCallState, - Retrying, - stop_after_attempt, -) - -RETRYABLE_STATUS_CODES = {408, 429, 502, 503, 504} - - -class RetryProvider(ABC): - """Interface for provider-specific retry logic.""" - - @abstractmethod - def extract_headers_from_exception( - self, exception: BaseException - ) -> Mapping[str, str] | None: - """Extract headers from provider-specific exception response structure.""" - pass - - @abstractmethod - def extract_status_code(self, exception: BaseException) -> int | None: - """Extract HTTP status code from provider-specific exception.""" - pass - - @abstractmethod - def get_retry_exceptions(self) -> tuple[type[Exception], ...]: - """Get the tuple of exception types that should always be retried.""" - pass - - -def _create_retry_condition( - retry_provider: RetryProvider, -) -> Callable[[RetryCallState], bool]: - """Create retry condition for tenacity.""" - - def _is_retryable_exception(exception: BaseException) -> bool: - """Determine if an exception should be retried.""" - retry_on_exceptions = retry_provider.get_retry_exceptions() - current: BaseException | None = exception - - while current is not None: - if isinstance(current, retry_on_exceptions): - return True - - status_code = retry_provider.extract_status_code(current) - if status_code is not None and status_code in RETRYABLE_STATUS_CODES: - return True - - # Check for Retry-After header (implies server wants retry) - headers = retry_provider.extract_headers_from_exception(current) - if headers: - retry_after = headers.get("retry-after") or headers.get("Retry-After") - if retry_after: - return True - - current = current.__cause__ - - return False - - def retry_condition(retry_state: RetryCallState) -> bool: - if retry_state.outcome is None: - return False - exception = retry_state.outcome.exception() - if exception is None: - return False - return _is_retryable_exception(exception) - - return retry_condition - - -def _create_wait_strategy( - retry_provider: RetryProvider, - initial: float = 5.0, - max_delay: float = 180.0, - logger: logging.Logger | None = None, -) -> Callable[[RetryCallState], float]: - """Create wait strategy honoring Retry-After header with exponential backoff fallback.""" - - def _parse_retry_after(header_value: str) -> float | None: - """Parse Retry-After header value (durations only, not datetimes).""" - try: - seconds = float(header_value.strip()) - if seconds < 0: - return None - return seconds - except (ValueError, AttributeError): - return None - - def _extract_retry_after_header(exception: BaseException) -> float | None: - """Extract and parse Retry-After header from exception chain.""" - current: BaseException | None = exception - while current: - headers = retry_provider.extract_headers_from_exception(current) - if headers: - retry_after = headers.get("retry-after") or headers.get("Retry-After") - if retry_after: - parsed = _parse_retry_after(retry_after) - if parsed is not None: - return parsed - current = current.__cause__ - return None - - def _exponential_backoff(attempt: int, initial: float) -> float: - """Calculate exponential backoff with jitter.""" - exponent = attempt - 1 - exponential = initial * (2**exponent) - jitter = random.uniform(0, 1.0) - return exponential + jitter - - def wait_strategy(retry_state: RetryCallState) -> float: - """Calculate wait time based on exception and retry state.""" - if retry_state.outcome is None: - return initial - - exception = retry_state.outcome.exception() - if exception is not None: - retry_after = _extract_retry_after_header(exception) - if retry_after is not None: - capped_wait = min(retry_after, max_delay) - if logger: - logger.info( - f"Retrying after {retry_after:.1f}s" - f"{f' (capped to {capped_wait:.1f}s)' if capped_wait != retry_after else ''}" - ) - return capped_wait - - exponential_wait = _exponential_backoff(retry_state.attempt_number, initial) - capped_wait = min(exponential_wait, max_delay) - if logger: - logger.info( - f"Retrying with exponential backoff after {capped_wait:.1f}s (attempt #{retry_state.attempt_number})" - ) - return capped_wait - - return wait_strategy - - -class BaseSyncRetryer(Retrying, ABC): - """Synchronous retry strategy base class. - - Args: - max_retries: Maximum number of retry attempts - initial: Initial delay for exponential backoff in seconds - max_delay: Maximum delay between retries in seconds - logger: Optional logger for retry events - """ - - @abstractmethod - def get_retry_provider(self) -> RetryProvider: - """Return the provider instance for this retryer.""" - pass - - def __init__( - self, - max_retries: int = 5, - initial: float = 5.0, - max_delay: float = 120.0, - logger: logging.Logger | None = None, - ): - retry_provider = self.get_retry_provider() - - Retrying.__init__( - self, - wait=_create_wait_strategy(retry_provider, initial, max_delay, logger), - retry=_create_retry_condition(retry_provider), - stop=stop_after_attempt(max_retries), - reraise=True, - ) - - -class BaseAsyncRetryer(AsyncRetrying, ABC): - """Asynchronous retry strategy base class. - - Args: - max_retries: Maximum number of retry attempts - initial: Initial delay for exponential backoff in seconds - max_delay: Maximum delay between retries in seconds - logger: Optional logger for retry events - """ - - @abstractmethod - def get_retry_provider(self) -> RetryProvider: - """Return the provider instance for this retryer.""" - pass - - def __init__( - self, - max_retries: int = 5, - initial: float = 5.0, - max_delay: float = 120.0, - logger: logging.Logger | None = None, - ): - retry_provider = self.get_retry_provider() - - AsyncRetrying.__init__( - self, - wait=_create_wait_strategy(retry_provider, initial, max_delay, logger), - retry=_create_retry_condition(retry_provider), - stop=stop_after_attempt(max_retries), - reraise=True, - ) diff --git a/src/uipath_langchain/chat/retryers/bedrock.py b/src/uipath_langchain/chat/retryers/bedrock.py deleted file mode 100644 index f7333a6af..000000000 --- a/src/uipath_langchain/chat/retryers/bedrock.py +++ /dev/null @@ -1,74 +0,0 @@ -"""AWS Bedrock specific retry strategy implementation.""" - -from typing import Mapping - -import botocore.exceptions - -from .base import BaseAsyncRetryer, BaseSyncRetryer, RetryProvider - -# Bedrock-specific exceptions that should always be retried -_BEDROCK_RETRY_EXCEPTIONS = ( - botocore.exceptions.ReadTimeoutError, - botocore.exceptions.ConnectTimeoutError, - botocore.exceptions.EndpointConnectionError, -) - - -class BedrockRetryProvider(RetryProvider): - """Provider for Bedrock specific exception handling.""" - - def extract_headers_from_exception( - self, exception: BaseException - ) -> Mapping[str, str] | None: - """Extract headers from botocore response structure.""" - if isinstance(exception, botocore.exceptions.ClientError): - response = exception.response - if "ResponseMetadata" in response: - headers = response["ResponseMetadata"].get("HTTPHeaders", {}) - if headers: - return headers - return None - - def extract_status_code(self, exception: BaseException) -> int | None: - """Extract HTTP status code from botocore response.""" - if isinstance(exception, botocore.exceptions.ClientError): - response = exception.response - if "ResponseMetadata" in response: - return response["ResponseMetadata"].get("HTTPStatusCode") - return None - - def get_retry_exceptions(self) -> tuple[type[Exception], ...]: - """Get Bedrock-specific exceptions that should always be retried.""" - return _BEDROCK_RETRY_EXCEPTIONS - - -class BedrockRetryer(BaseSyncRetryer): - """Synchronous retry strategy for AWS Bedrock with botocore exception handling. - - Handles botocore-based exceptions and response structures from the boto3 SDK. - - Args: - max_retries: Maximum number of retry attempts - initial: Initial delay for exponential backoff in seconds - max_delay: Maximum delay between retries in seconds - logger: Optional logger for retry events - """ - - def get_retry_provider(self) -> RetryProvider: - return BedrockRetryProvider() - - -class AsyncBedrockRetryer(BaseAsyncRetryer): - """Asynchronous retry strategy for AWS Bedrock with botocore exception handling. - - Handles botocore-based exceptions and response structures from the boto3 SDK. - - Args: - max_retries: Maximum number of retry attempts - initial: Initial delay for exponential backoff in seconds - max_delay: Maximum delay between retries in seconds - logger: Optional logger for retry events - """ - - def get_retry_provider(self) -> RetryProvider: - return BedrockRetryProvider() diff --git a/src/uipath_langchain/chat/retryers/vertex.py b/src/uipath_langchain/chat/retryers/vertex.py deleted file mode 100644 index 7867c059f..000000000 --- a/src/uipath_langchain/chat/retryers/vertex.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Vertex AI (Google GenAI) specific retry strategy implementation.""" - -from typing import Mapping - -import httpx -from google.genai.errors import APIError - -from .base import BaseAsyncRetryer, BaseSyncRetryer, RetryProvider - -# Vertex-specific exceptions that should always be retried -_VERTEX_RETRY_EXCEPTIONS = ( - httpx.TimeoutException, - httpx.ConnectError, - httpx.RemoteProtocolError, -) - - -class VertexRetryProvider(RetryProvider): - """Provider for Vertex AI specific exception handling.""" - - def extract_headers_from_exception( - self, exception: BaseException - ) -> Mapping[str, str] | None: - """Extract headers from google.genai APIError response structure.""" - if isinstance(exception, APIError): - if hasattr(exception.response, "headers"): - return exception.response.headers - return None - - def extract_status_code(self, exception: BaseException) -> int | None: - """Extract HTTP status code from google.genai APIError.""" - if isinstance(exception, APIError): - return exception.code - return None - - def get_retry_exceptions(self) -> tuple[type[Exception], ...]: - """Get Vertex-specific exceptions that should always be retried.""" - return _VERTEX_RETRY_EXCEPTIONS - - -class VertexRetryer(BaseSyncRetryer): - """Synchronous retry strategy for Vertex AI with httpx exception handling. - - Handles httpx-based exceptions and response structures from the Google GenAI SDK. - - Args: - max_retries: Maximum number of retry attempts - initial: Initial delay for exponential backoff in seconds - max_delay: Maximum delay between retries in seconds - logger: Optional logger for retry events - """ - - def get_retry_provider(self) -> RetryProvider: - return VertexRetryProvider() - - -class AsyncVertexRetryer(BaseAsyncRetryer): - """Asynchronous retry strategy for Vertex AI with httpx exception handling. - - Handles httpx-based exceptions and response structures from the Google GenAI SDK. - - Args: - max_retries: Maximum number of retry attempts - initial: Initial delay for exponential backoff in seconds - max_delay: Maximum delay between retries in seconds - logger: Optional logger for retry events - """ - - def get_retry_provider(self) -> RetryProvider: - return VertexRetryProvider() diff --git a/src/uipath_langchain/chat/supported_models.py b/src/uipath_langchain/chat/supported_models.py deleted file mode 100644 index 5634e445a..000000000 --- a/src/uipath_langchain/chat/supported_models.py +++ /dev/null @@ -1,54 +0,0 @@ -from enum import StrEnum - - -class OpenAIModels(StrEnum): - """Supported OpenAI model identifiers.""" - - # GPT-4o models - gpt_4o_2024_05_13 = "gpt-4o-2024-05-13" - gpt_4o_2024_08_06 = "gpt-4o-2024-08-06" - gpt_4o_2024_11_20 = "gpt-4o-2024-11-20" - gpt_4o_mini_2024_07_18 = "gpt-4o-mini-2024-07-18" - - # GPT-4.1 models - gpt_4_1_2025_04_14 = "gpt-4.1-2025-04-14" - gpt_4_1_mini_2025_04_14 = "gpt-4.1-mini-2025-04-14" - gpt_4_1_nano_2025_04_14 = "gpt-4.1-nano-2025-04-14" - - # GPT-5 models - gpt_5_2025_08_07 = "gpt-5-2025-08-07" - gpt_5_chat_2025_08_07 = "gpt-5-chat-2025-08-07" - gpt_5_mini_2025_08_07 = "gpt-5-mini-2025-08-07" - gpt_5_nano_2025_08_07 = "gpt-5-nano-2025-08-07" - - # GPT-5.1 models - gpt_5_1_2025_11_13 = "gpt-5.1-2025-11-13" - - # GPT-5.2 models - gpt_5_2_2025_12_11 = "gpt-5.2-2025-12-11" - - -class GeminiModels(StrEnum): - """Supported Google Gemini model identifiers.""" - - # Gemini 2 models - gemini_2_5_pro = "gemini-2.5-pro" - gemini_2_5_flash = "gemini-2.5-flash" - gemini_2_0_flash_001 = "gemini-2.0-flash-001" - - # Gemini 3 models - gemini_3_pro_preview = "gemini-3-pro-preview" - - -class BedrockModels(StrEnum): - """Supported AWS Bedrock model identifiers.""" - - # Claude 3.7 models - anthropic_claude_3_7_sonnet = "anthropic.claude-3-7-sonnet-20250219-v1:0" - - # Claude 4 models - anthropic_claude_sonnet_4 = "anthropic.claude-sonnet-4-20250514-v1:0" - - # Claude 4.5 models - anthropic_claude_sonnet_4_5 = "anthropic.claude-sonnet-4-5-20250929-v1:0" - anthropic_claude_haiku_4_5 = "anthropic.claude-haiku-4-5-20251001-v1:0" diff --git a/src/uipath_langchain/chat/types.py b/src/uipath_langchain/chat/types.py index af7c68618..35d736b44 100644 --- a/src/uipath_langchain/chat/types.py +++ b/src/uipath_langchain/chat/types.py @@ -1,41 +1,20 @@ from enum import StrEnum -from typing import Protocol, runtime_checkable class LLMProvider(StrEnum): """LLM provider/vendor identifier.""" - OPENAI = "OpenAi" - BEDROCK = "AwsBedrock" - VERTEX = "VertexAi" + OPENAI = "openai" + BEDROCK = "awsbedrock" + VERTEX = "vertexai" class APIFlavor(StrEnum): """API flavor for LLM communication.""" - OPENAI_RESPONSES = "OpenAIResponses" - OPENAI_COMPLETIONS = "OpenAiChatCompletions" - AWS_BEDROCK_CONVERSE = "AwsBedrockConverse" - AWS_BEDROCK_INVOKE = "AwsBedrockInvoke" - VERTEX_GEMINI_GENERATE_CONTENT = "GeminiGenerateContent" - VERTEX_ANTHROPIC_CLAUDE = "AnthropicClaude" - - -@runtime_checkable -class UiPathPassthroughChatModel(Protocol): - """Protocol for UiPath chat models with provider and flavor information. - - All UiPath chat model classes (UiPathChatOpenAI, UiPathChatBedrock, - UiPathChatBedrockConverse, UiPathChatVertex, UiPathChat, UiPathAzureChatOpenAI) - implement this protocol. - """ - - @property - def llm_provider(self) -> LLMProvider: - """The LLM provider for this model.""" - ... - - @property - def api_flavor(self) -> APIFlavor: - """The API flavor for this model.""" - ... + OPENAI_RESPONSES = "responses" + OPENAI_COMPLETIONS = "chat-completions" + AWS_BEDROCK_CONVERSE = "converse" + AWS_BEDROCK_INVOKE = "invoke" + VERTEX_GEMINI_GENERATE_CONTENT = "generate-content" + VERTEX_ANTHROPIC_CLAUDE = "anthropic-claude" diff --git a/src/uipath_langchain/chat/vertex.py b/src/uipath_langchain/chat/vertex.py deleted file mode 100644 index ce568ca3d..000000000 --- a/src/uipath_langchain/chat/vertex.py +++ /dev/null @@ -1,389 +0,0 @@ -import logging -import os -from collections.abc import AsyncIterator, Iterator -from typing import Any, Optional - -import httpx -from langchain_core.callbacks import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun, -) -from langchain_core.messages import BaseMessage -from langchain_core.outputs import ChatGenerationChunk, ChatResult -from tenacity import AsyncRetrying, Retrying -from uipath._utils import resource_override -from uipath._utils._ssl_context import get_httpx_client_kwargs -from uipath.utils import EndpointManager - -from .header_capture import HeaderCapture -from .retryers.vertex import AsyncVertexRetryer, VertexRetryer -from .supported_models import GeminiModels -from .types import APIFlavor, LLMProvider - -logger = logging.getLogger(__name__) - - -def _check_genai_dependencies() -> None: - """Check if required dependencies for UiPathChatVertex are installed.""" - import importlib.util - - missing_packages = [] - - if importlib.util.find_spec("langchain_google_genai") is None: - missing_packages.append("langchain-google-genai") - - if importlib.util.find_spec("google.genai") is None: - missing_packages.append("google-genai") - - if missing_packages: - packages_str = ", ".join(missing_packages) - raise ImportError( - f"The following packages are required to use UiPathChatVertex: {packages_str}\n" - "Please install them using one of the following methods:\n\n" - " # Using pip:\n" - f" pip install uipath-langchain[vertex]\n\n" - " # Using uv:\n" - f" uv add 'uipath-langchain[vertex]'\n\n" - ) - - -_check_genai_dependencies() - -import google.genai -from google.genai import types as genai_types -from langchain_google_genai import ChatGoogleGenerativeAI -from pydantic import PrivateAttr - - -def _rewrite_vertex_url(original_url: str, gateway_url: str) -> httpx.URL | None: - """Rewrite Google GenAI URLs to UiPath gateway endpoint. - - Handles URL patterns containing generateContent or streamGenerateContent. - Returns the gateway URL, or None if no rewrite needed. - """ - if "generateContent" in original_url or "streamGenerateContent" in original_url: - url = httpx.URL(gateway_url) - if "alt=sse" in original_url: - url = url.copy_with(params={"alt": "sse"}) - return url - return None - - -class _UrlRewriteTransport(httpx.HTTPTransport): - """Transport that rewrites URLs to redirect to UiPath gateway.""" - - def __init__( - self, - gateway_url: str, - verify: bool = True, - header_capture: HeaderCapture | None = None, - ): - super().__init__(verify=verify) - self.gateway_url = gateway_url - self.header_capture = header_capture - - def handle_request(self, request: httpx.Request) -> httpx.Response: - original_url = str(request.url) - new_url = _rewrite_vertex_url(original_url, self.gateway_url) - if new_url: - # Set streaming header based on original URL before modifying - is_streaming = "alt=sse" in original_url - request.headers["X-UiPath-Streaming-Enabled"] = ( - "true" if is_streaming else "false" - ) - # Update host header to match the new URL - request.headers["host"] = new_url.host - request.url = new_url - - response = super().handle_request(request) - if self.header_capture: - self.header_capture.set(dict(response.headers)) - - return response - - -class _AsyncUrlRewriteTransport(httpx.AsyncHTTPTransport): - """Async transport that rewrites URLs to redirect to UiPath gateway.""" - - def __init__( - self, - gateway_url: str, - verify: bool = True, - header_capture: HeaderCapture | None = None, - ): - super().__init__(verify=verify) - self.gateway_url = gateway_url - self.header_capture = header_capture - - async def handle_async_request(self, request: httpx.Request) -> httpx.Response: - original_url = str(request.url) - new_url = _rewrite_vertex_url(original_url, self.gateway_url) - if new_url: - # Set streaming header based on original URL before modifying - is_streaming = "alt=sse" in original_url - request.headers["X-UiPath-Streaming-Enabled"] = ( - "true" if is_streaming else "false" - ) - # Update host header to match the new URL - request.headers["host"] = new_url.host - request.url = new_url - - response = await super().handle_async_request(request) - if self.header_capture: - self.header_capture.set(dict(response.headers)) - - return response - - -class UiPathChatVertex(ChatGoogleGenerativeAI): - """UiPath Vertex AI Chat model that routes requests through UiPath's LLM Gateway.""" - - llm_provider: LLMProvider = LLMProvider.VERTEX - api_flavor: APIFlavor = APIFlavor.VERTEX_GEMINI_GENERATE_CONTENT - - _vendor: str = PrivateAttr(default="vertexai") - _model_name: str = PrivateAttr() - _uipath_token: str = PrivateAttr() - _uipath_llmgw_url: Optional[str] = PrivateAttr(default=None) - _agenthub_config: Optional[str] = PrivateAttr(default=None) - _byo_connection_id: Optional[str] = PrivateAttr(default=None) - _retryer: Optional[Retrying] = PrivateAttr(default=None) - _aretryer: Optional[AsyncRetrying] = PrivateAttr(default=None) - - @resource_override( - resource_identifier="byo_connection_id", resource_type="connection" - ) - def __init__( - self, - org_id: Optional[str] = None, - tenant_id: Optional[str] = None, - token: Optional[str] = None, - model_name: str = GeminiModels.gemini_2_5_flash, - temperature: Optional[float] = None, - agenthub_config: Optional[str] = None, - byo_connection_id: Optional[str] = None, - retryer: Optional[Retrying] = None, - aretryer: Optional[AsyncRetrying] = None, - **kwargs: Any, - ): - org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID") - tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID") - token = token or os.getenv("UIPATH_ACCESS_TOKEN") - - if not org_id: - raise ValueError( - "UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required" - ) - if not tenant_id: - raise ValueError( - "UIPATH_TENANT_ID environment variable or tenant_id parameter is required" - ) - if not token: - raise ValueError( - "UIPATH_ACCESS_TOKEN environment variable or token parameter is required" - ) - - uipath_url = self._build_base_url(model_name) - headers = self._build_headers(token, agenthub_config, byo_connection_id) - - header_capture = HeaderCapture(name=f"vertex_headers_{id(self)}") - client_kwargs = get_httpx_client_kwargs() - verify = client_kwargs.get("verify", True) - - http_options = genai_types.HttpOptions( - httpx_client=httpx.Client( - transport=_UrlRewriteTransport( - uipath_url, verify=verify, header_capture=header_capture - ), - headers=headers, - **client_kwargs, - ), - httpx_async_client=httpx.AsyncClient( - transport=_AsyncUrlRewriteTransport( - uipath_url, verify=verify, header_capture=header_capture - ), - headers=headers, - **client_kwargs, - ), - ) - - if temperature is None and ( - "gemini-3" in model_name or "gemini-2" in model_name - ): - temperature = 1.0 - - super().__init__( - model=model_name, - google_api_key="uipath-gateway", - temperature=temperature, - max_retries=1, - **kwargs, - ) - - custom_client = google.genai.Client( - api_key="uipath-gateway", - http_options=http_options, - ) - - object.__setattr__(self, "client", custom_client) - - self._model_name = model_name - self._uipath_token = token - self._uipath_llmgw_url = uipath_url - self._agenthub_config = agenthub_config - self._byo_connection_id = byo_connection_id - self._retryer = retryer - self._aretryer = aretryer - self._header_capture = header_capture - - if self.temperature is not None and not 0 <= self.temperature <= 2.0: - raise ValueError("temperature must be in the range [0.0, 2.0]") - - if self.top_p is not None and not 0 <= self.top_p <= 1: - raise ValueError("top_p must be in the range [0.0, 1.0]") - - if self.top_k is not None and self.top_k <= 0: - raise ValueError("top_k must be positive") - - additional_headers = self.additional_headers or {} - self.default_metadata = tuple(additional_headers.items()) - - @staticmethod - def _build_headers( - token: str, - agenthub_config: Optional[str] = None, - byo_connection_id: Optional[str] = None, - ) -> dict[str, str]: - """Build HTTP headers for UiPath Gateway requests.""" - headers = { - "Authorization": f"Bearer {token}", - } - if agenthub_config: - headers["X-UiPath-AgentHub-Config"] = agenthub_config - if byo_connection_id: - headers["X-UiPath-LlmGateway-ByoIsConnectionId"] = byo_connection_id - if job_key := os.getenv("UIPATH_JOB_KEY"): - headers["X-UiPath-JobKey"] = job_key - if process_key := os.getenv("UIPATH_PROCESS_KEY"): - headers["X-UiPath-ProcessKey"] = process_key - return headers - - @staticmethod - def _build_base_url(model_name: str) -> str: - """Build the full URL for the UiPath LLM Gateway.""" - env_uipath_url = os.getenv("UIPATH_URL") - - if not env_uipath_url: - raise ValueError("UIPATH_URL environment variable is required") - - vendor_endpoint = EndpointManager.get_vendor_endpoint() - formatted_endpoint = vendor_endpoint.format( - vendor="vertexai", - model=model_name, - ) - return f"{env_uipath_url.rstrip('/')}/{formatted_endpoint}" - - def invoke(self, *args, **kwargs): - retryer = self._retryer or _get_default_retryer() - return retryer(super().invoke, *args, **kwargs) - - async def ainvoke(self, *args, **kwargs): - retryer = self._aretryer or _get_default_async_retryer() - return await retryer(super().ainvoke, *args, **kwargs) - - def _merge_finish_reason_to_response_metadata( - self, result: ChatResult - ) -> ChatResult: - """Merge finish_reason from generation_info into AIMessage.response_metadata. - - LangChain's ChatGoogleGenerativeAI stores finish_reason in generation_info - but not in AIMessage.response_metadata. This method merges it so that - check_stop_reason() in VertexGeminiPayloadHandler can access it. - """ - for generation in result.generations: - finish_reason = None - if generation.generation_info: - finish_reason = generation.generation_info.get("finish_reason") - - if finish_reason and hasattr(generation, "message"): - message = generation.message - if message.response_metadata is None: - message.response_metadata = {} - if "finish_reason" not in message.response_metadata: - message.response_metadata["finish_reason"] = finish_reason - - return result - - def _generate( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: CallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> ChatResult: - """Generate and ensure finish_reason is in response_metadata.""" - result = super()._generate( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - result = self._merge_finish_reason_to_response_metadata(result) - self._header_capture.attach_to_chat_result(result) - self._header_capture.clear() - return result - - async def _agenerate( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: AsyncCallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> ChatResult: - """Generate async and ensure finish_reason is in response_metadata.""" - result = await super()._agenerate( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - result = self._merge_finish_reason_to_response_metadata(result) - self._header_capture.attach_to_chat_result(result) - self._header_capture.clear() - return result - - def _stream( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: CallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> Iterator[ChatGenerationChunk]: - for chunk in super()._stream( - messages, stop=stop, run_manager=run_manager, **kwargs - ): - self._header_capture.attach_to_chat_generation(chunk) - yield chunk - - self._header_capture.clear() - - async def _astream( - self, - messages: list[BaseMessage], - stop: list[str] | None = None, - run_manager: AsyncCallbackManagerForLLMRun | None = None, - **kwargs: Any, - ) -> AsyncIterator[ChatGenerationChunk]: - async for chunk in super()._astream( - messages, stop=stop, run_manager=run_manager, **kwargs - ): - self._header_capture.attach_to_chat_generation(chunk) - yield chunk - - self._header_capture.clear() - - -def _get_default_retryer() -> VertexRetryer: - return VertexRetryer( - logger=logger, - ) - - -def _get_default_async_retryer() -> AsyncVertexRetryer: - return AsyncVertexRetryer( - logger=logger, - ) diff --git a/src/uipath_langchain/embeddings/__init__.py b/src/uipath_langchain/embeddings/__init__.py index e53872627..00b5788d1 100644 --- a/src/uipath_langchain/embeddings/__init__.py +++ b/src/uipath_langchain/embeddings/__init__.py @@ -1,6 +1,54 @@ -from .embeddings import UiPathAzureOpenAIEmbeddings, UiPathOpenAIEmbeddings +""" +UiPath LangChain Embeddings module. + +NOTE: This module uses lazy imports via __getattr__ to avoid loading heavy +dependencies (langchain_openai, openai SDK) at import time. This significantly +improves CLI startup performance. + +Do NOT add eager imports like: + from .models import UiPathOpenAIEmbeddings # BAD - loads langchain_openai immediately + +Instead, all exports are loaded on-demand when first accessed. +""" + + +def __getattr__(name: str): + if name == "UiPathEmbeddings": + from uipath_langchain_client.clients.normalized import ( + UiPathNormalizedEmbeddings, + ) + + return UiPathNormalizedEmbeddings + if name == "UiPathAzureOpenAIEmbeddings": + from uipath_langchain_client.clients.openai.embeddings import ( + UiPathAzureOpenAIEmbeddings, + ) + + return UiPathAzureOpenAIEmbeddings + if name == "UiPathOpenAIEmbeddings": + from uipath_langchain_client.clients.openai.embeddings import ( + UiPathOpenAIEmbeddings, + ) + + return UiPathOpenAIEmbeddings + if name == "UiPathGoogleGenerativeAIEmbeddings": + from uipath_langchain_client.clients.google.embeddings import ( + UiPathGoogleGenerativeAIEmbeddings, + ) + + return UiPathGoogleGenerativeAIEmbeddings + if name == "UiPathBedrockEmbeddings": + from uipath_langchain_client.clients.bedrock.embeddings import ( + UiPathBedrockEmbeddings, + ) + + return UiPathBedrockEmbeddings + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + __all__ = [ "UiPathAzureOpenAIEmbeddings", "UiPathOpenAIEmbeddings", + "UiPathGoogleGenerativeAIEmbeddings", + "UiPathBedrockEmbeddings", ] diff --git a/src/uipath_langchain/embeddings/embeddings.py b/src/uipath_langchain/embeddings/embeddings.py deleted file mode 100644 index aca8adaf9..000000000 --- a/src/uipath_langchain/embeddings/embeddings.py +++ /dev/null @@ -1,215 +0,0 @@ -import os -from typing import Any - -import httpx -from langchain_openai.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings -from pydantic import Field -from uipath._utils._ssl_context import get_httpx_client_kwargs -from uipath.utils import EndpointManager - -from uipath_langchain._utils._request_mixin import UiPathRequestMixin - - -class UiPathAzureOpenAIEmbeddings(UiPathRequestMixin, AzureOpenAIEmbeddings): - """Custom Embeddings connector for LangChain integration with UiPath. - - This class modifies the OpenAI client to: - - Use UiPath endpoints - - Log request/response durations - - Apply custom URL preparation and header building - """ - - model_name: str | None = Field( - default_factory=lambda: os.getenv( - "UIPATH_MODEL_NAME", "text-embedding-3-large" - ), - alias="model", - ) - - def __init__(self, **kwargs): - default_client_kwargs = get_httpx_client_kwargs() - client_kwargs = { - **default_client_kwargs, - "event_hooks": { - "request": [self._log_request_duration], - "response": [self._log_response_duration], - }, - } - aclient_kwargs = { - **default_client_kwargs, - "event_hooks": { - "request": [self._alog_request_duration], - "response": [self._alog_response_duration], - }, - } - super().__init__( - http_client=httpx.Client(**client_kwargs), - http_async_client=httpx.AsyncClient(**aclient_kwargs), - **kwargs, - ) - # Monkey-patch the OpenAI client to use your custom methods - self.client._client._prepare_url = self._prepare_url - self.client._client._build_headers = self._build_headers - self.async_client._client._prepare_url = self._prepare_url - self.async_client._client._build_headers = self._build_headers - - @property - def endpoint(self) -> str: - endpoint = EndpointManager.get_embeddings_endpoint() - return endpoint.format( - model=self.model_name, api_version=self.openai_api_version - ) - - -class UiPathOpenAIEmbeddings(UiPathRequestMixin, OpenAIEmbeddings): - """Custom Embeddings connector for LangChain integration with UiPath. - - This implementation uses custom _call and _acall methods for full control - over the API request/response cycle. - """ - - model_name: str | None = Field( - default_factory=lambda: os.getenv( - "UIPATH_MODEL_NAME", "text-embedding-3-large" - ), - alias="model", - ) - - # Add instance variables for tracking if needed - def __init__(self, **kwargs): - super().__init__(**kwargs) - self._total_tokens = 0 - self._total_requests = 0 - - def embed_documents( - self, texts: list[str], chunk_size: int | None = None, **kwargs: Any - ) -> list[list[float]]: - """Embed a list of documents using UiPath endpoint. - - Args: - texts: List of texts to embed - chunk_size: Number of texts to process in each batch - **kwargs: Additional arguments passed to the API - - Returns: - List of embeddings for each text - """ - chunk_size_ = chunk_size or self.chunk_size - embeddings: list[list[float]] = [] - - for i in range(0, len(texts), chunk_size_): - chunk = texts[i : i + chunk_size_] - - # Build payload matching OpenAI API format - payload: dict[str, Any] = { - "input": chunk, - "model": self.model, - } - - # Add optional parameters - if self.dimensions is not None: - payload["dimensions"] = self.dimensions - - # Add model_kwargs and any additional kwargs - payload.update(self.model_kwargs) - payload.update(kwargs) - - # Make the API call using custom _call method - response = self._call(self.url, payload, self.auth_headers) - - # Extract embeddings - chunk_embeddings = [r["embedding"] for r in response["data"]] - embeddings.extend(chunk_embeddings) - - # Track usage internally (optional) - if "usage" in response: - self._total_tokens += response["usage"].get("total_tokens", 0) - self._total_requests += 1 - - return embeddings - - async def aembed_documents( - self, - texts: list[str], - chunk_size: int | None = None, - **kwargs: Any, - ) -> list[list[float]]: - """Async version of embed_documents. - - Args: - texts: List of texts to embed - chunk_size: Number of texts to process in each batch - **kwargs: Additional arguments passed to the API - - Returns: - List of embeddings for each text - """ - chunk_size_ = chunk_size or self.chunk_size - embeddings: list[list[float]] = [] - - for i in range(0, len(texts), chunk_size_): - chunk = texts[i : i + chunk_size_] - - # Build payload matching OpenAI API format - payload: dict[str, Any] = { - "input": chunk, - "model": self.model, - } - - # Add optional parameters - if self.dimensions is not None: - payload["dimensions"] = self.dimensions - - # Add model_kwargs and any additional kwargs - payload.update(self.model_kwargs) - payload.update(kwargs) - - # Make the async API call using custom _acall method - response = await self._acall(self.url, payload, self.auth_headers) - - # Extract embeddings - chunk_embeddings = [r["embedding"] for r in response["data"]] - embeddings.extend(chunk_embeddings) - - # Track usage internally (optional) - if "usage" in response: - self._total_tokens += response["usage"].get("total_tokens", 0) - self._total_requests += 1 - - return embeddings - - @property - def endpoint(self) -> str: - """Get the UiPath endpoint for embeddings.""" - endpoint = EndpointManager.get_embeddings_endpoint() - return endpoint.format( - model=self.model_name, api_version=self.openai_api_version - ) - - @property - def url(self) -> str: - """Get the full URL for API requests.""" - return self.endpoint - - @property - def auth_headers(self) -> dict[str, str]: - """Get authentication headers for API requests.""" - headers = {} - if self.openai_api_key: - headers["Authorization"] = ( - f"Bearer {self.openai_api_key.get_secret_value()}" - ) - if self.default_headers: - headers.update(self.default_headers) - return headers - - def get_usage_stats(self) -> dict[str, int]: - """Get token usage statistics. - - Returns: - Dictionary with total_tokens and total_requests - """ - return { - "total_tokens": self._total_tokens, - "total_requests": self._total_requests, - } diff --git a/testcases/chat-models/src/main.py b/testcases/chat-models/src/main.py index 97c7ad6a4..12b929ce4 100644 --- a/testcases/chat-models/src/main.py +++ b/testcases/chat-models/src/main.py @@ -1,16 +1,22 @@ import logging from typing import Any, Callable, Literal, Optional +from langchain_core.language_models import BaseChatModel from langchain_core.messages import HumanMessage from langchain_core.tools import tool from langgraph.checkpoint.memory import MemorySaver -from langgraph.graph import END, START, StateGraph, MessagesState +from langgraph.graph import END, START, MessagesState, StateGraph from pydantic import BaseModel, Field -from langchain_core.language_models import BaseChatModel -from uipath_langchain.chat.bedrock import UiPathChatBedrock, UiPathChatBedrockConverse -from uipath_langchain.chat.vertex import UiPathChatVertex -from uipath_langchain.chat import UiPathChatOpenAI, UiPathChat, UiPathAzureChatOpenAI +from uipath_langchain.chat import ( + UiPathAzureChatOpenAI, + UiPathChat, + UiPathChatAnthropic, + UiPathChatAnthropicVertex, + UiPathChatBedrock, + UiPathChatBedrockConverse, + UiPathChatGoogleGenerativeAI, +) logger = logging.getLogger(__name__) @@ -18,12 +24,28 @@ def create_test_models(max_tokens: int = 100) -> list[tuple[str, Any]]: """Create all test chat models with the specified max_tokens.""" return [ - ("UiPathChatOpenAI", UiPathChatOpenAI(use_responses_api=True)), - ("UiPathChatVertex", UiPathChatVertex()), - ("UiPathChatBedrockConverse", UiPathChatBedrockConverse()), - ("UiPathChatBedrock", UiPathChatBedrock()), - ("UiPathChat", UiPathChat()), - ("UiPathAzureChatOpenAI", UiPathAzureChatOpenAI()) + ("UiPathChat", UiPathChat(model="gpt-4o-2024-11-20")), + ("UiPathAzureChatOpenAI", UiPathAzureChatOpenAI(model="gpt-4o-2024-11-20")), + ( + "UiPathChatBedrock", + UiPathChatBedrock(model="anthropic.claude-haiku-4-5-20251001-v1:0"), + ), + ( + "UiPathChatBedrockConverse", + UiPathChatBedrockConverse(model="anthropic.claude-haiku-4-5-20251001-v1:0"), + ), + ( + "UiPathChatGoogleGenerativeAI", + UiPathChatGoogleGenerativeAI(model="gemini-2.5-flash"), + ), + ( + "UiPathChatAnthropic", + UiPathChatAnthropic(model="anthropic.claude-haiku-4-5-20251001-v1:0"), + ), + ( + "UiPathChatAnthropicVertex", + UiPathChatAnthropicVertex(model="claude-haiku-4-5@20251001"), + ), ] @@ -42,7 +64,9 @@ def format_error_message(error: str, max_length: int = 60) -> str: @tool -def get_weather(location: str, unit: Literal["celsius", "fahrenheit"] = "celsius") -> str: +def get_weather( + location: str, unit: Literal["celsius", "fahrenheit"] = "celsius" +) -> str: """Get the current weather for a location. Args: @@ -68,6 +92,7 @@ def calculate(expression: str) -> str: class PersonInfo(BaseModel): """Information about a person.""" + name: str = Field(description="The person's full name") age: int = Field(description="The person's age in years") city: str = Field(description="The city where the person lives") @@ -75,15 +100,16 @@ class PersonInfo(BaseModel): class TestResult: """Accumulates test metrics across all test runs.""" + def __init__(self): self.chunks = 0 self.content_length = 0 self.tool_calls = 0 def add_response(self, response: Any) -> None: - if hasattr(response, 'content') and response.content: + if hasattr(response, "content") and response.content: self.content_length += len(response.content) - if hasattr(response, 'tool_calls') and response.tool_calls: + if hasattr(response, "tool_calls") and response.tool_calls: self.tool_calls += len(response.tool_calls) def add_chunks(self, count: int) -> None: @@ -121,14 +147,15 @@ async def run_test_method( class GraphInput(BaseModel): """Input model for the testing graph.""" + prompt: str = Field( - default="Count from 1 to 5.", - description="The prompt to send to the LLM" + default="Count from 1 to 5.", description="The prompt to send to the LLM" ) class GraphOutput(BaseModel): """Output model for the testing graph.""" + success: bool result_summary: str chunks_received: Optional[int] = None @@ -138,6 +165,7 @@ class GraphOutput(BaseModel): class GraphState(MessagesState): """State model for the testing workflow.""" + prompt: str success: bool result_summary: str @@ -178,7 +206,7 @@ async def test_single_model_all( ("invoke", False, False), ("ainvoke", True, False), ("stream", False, True), - ("astream", True, True) + ("astream", True, True), ] for method_name, is_async, is_streaming in test_methods: @@ -193,7 +221,7 @@ async def test_single_model_all( model_results[method_name] = "✓" # Test tool calling - logger.info(f" Testing tool_calling...") + logger.info(" Testing tool_calling...") try: llm_with_tools = model.bind_tools(tools) chunks = [] @@ -204,20 +232,24 @@ async def test_single_model_all( for chunk in chunks: accumulated = chunk if accumulated is None else accumulated + chunk - if accumulated and hasattr(accumulated, 'tool_calls') and accumulated.tool_calls: + if ( + accumulated + and hasattr(accumulated, "tool_calls") + and accumulated.tool_calls + ): tool_calls_count = len(accumulated.tool_calls) result.add_tool_calls(tool_calls_count) logger.info(f" Tool calls detected: {tool_calls_count}") model_results["tool_calling"] = f"✓ ({tool_calls_count} calls)" else: - logger.warning(f" No tool calls detected") + logger.warning(" No tool calls detected") model_results["tool_calling"] = "✗ No tool calls detected" except Exception as e: logger.error(f" Tool calling failed: {e}") model_results["tool_calling"] = f"✗ {format_error_message(str(e))}" # Test structured output - logger.info(f" Testing structured_output...") + logger.info(" Testing structured_output...") try: llm_with_structure = model.with_structured_output(PersonInfo) response = await llm_with_structure.ainvoke(structured_messages) @@ -248,18 +280,28 @@ async def run_all_tests(state: GraphState) -> dict: """Run all tests for all chat models in parallel.""" import asyncio - logger.info("="*80) + logger.info("=" * 80) logger.info("Running All Tests") - logger.info("="*80) + logger.info("=" * 80) models = create_test_models(max_tokens=2000) tools = [get_weather, calculate] - tool_messages = [HumanMessage(content="What's the weather in San Francisco? Also calculate 15 * 23.")] - structured_messages = [HumanMessage(content="Tell me about John Smith, a 35 year old software engineer living in New York.")] + tool_messages = [ + HumanMessage( + content="What's the weather in San Francisco? Also calculate 15 * 23." + ) + ] + structured_messages = [ + HumanMessage( + content="Tell me about John Smith, a 35 year old software engineer living in New York." + ) + ] # Run all models in parallel tasks = [ - test_single_model_all(name, model, state["messages"], tools, tool_messages, structured_messages) + test_single_model_all( + name, model, state["messages"], tools, tool_messages, structured_messages + ) for name, model in models ] results_list = await asyncio.gather(*tasks) @@ -275,17 +317,34 @@ async def run_all_tests(state: GraphState) -> dict: total_result.tool_calls += result.tool_calls # Build summary - logger.info("="*80) + logger.info("=" * 80) summary_lines = [] - for model_name in ["UiPathChatOpenAI", "UiPathChatVertex", "UiPathChatBedrockConverse", "UiPathChatBedrock", "UiPathChat", "UiPathAzureChatOpenAI"]: + for model_name in [ + "UiPathChat", + "UiPathAzureChatOpenAI", + "UiPathChatBedrock", + "UiPathChatBedrockConverse", + "UiPathChatGoogleGenerativeAI", + "UiPathChatAnthropic", + "UiPathChatAnthropicVertex", + ]: if model_name in all_model_results: summary_lines.append(f"{model_name}:") results = all_model_results[model_name] - for test_name in ["invoke", "ainvoke", "stream", "astream", "tool_calling", "structured_output"]: + for test_name in [ + "invoke", + "ainvoke", + "stream", + "astream", + "tool_calling", + "structured_output", + ]: if test_name in results: summary_lines.append(f" {test_name}: {results[test_name]}") - has_failures = any("✗" in str(v) for r in all_model_results.values() for v in r.values()) + has_failures = any( + "✗" in str(v) for r in all_model_results.values() for v in r.values() + ) return { "success": not has_failures, @@ -299,16 +358,16 @@ async def run_all_tests(state: GraphState) -> dict: async def return_results(state: GraphState) -> GraphOutput: """Return final test results.""" - logger.info("="*80) + logger.info("=" * 80) logger.info("TEST RESULTS") - logger.info("="*80) + logger.info("=" * 80) logger.info(f"Success: {state['success']}") logger.info(f"Summary: {state['result_summary']}") - if state.get('chunks_received'): + if state.get("chunks_received"): logger.info(f"Chunks Received: {state['chunks_received']}") - if state.get('content_length'): + if state.get("content_length"): logger.info(f"Content Length: {state['content_length']}") - if state.get('tool_calls_count'): + if state.get("tool_calls_count"): logger.info(f"Tool Calls: {state['tool_calls_count']}") return GraphOutput( diff --git a/testcases/company-research-agent/src/graph.py b/testcases/company-research-agent/src/graph.py index 3dadbe28a..08ec4948c 100644 --- a/testcases/company-research-agent/src/graph.py +++ b/testcases/company-research-agent/src/graph.py @@ -2,7 +2,6 @@ from langchain_community.tools import DuckDuckGoSearchResults from langgraph.graph import END, START, MessagesState, StateGraph from pydantic import BaseModel - from uipath_langchain.chat import UiPathChat # Configuration constants @@ -40,7 +39,7 @@ def get_search_tool() -> DuckDuckGoSearchResults: def create_llm() -> UiPathChat: """Create and configure the language model.""" - return UiPathChat(streaming=False) + return UiPathChat(model="gpt-4o-2024-11-20", streaming=False) def create_research_agent(): diff --git a/testcases/ticket-classification/src/main.py b/testcases/ticket-classification/src/main.py index e0211f890..4d1dc954e 100644 --- a/testcases/ticket-classification/src/main.py +++ b/testcases/ticket-classification/src/main.py @@ -7,12 +7,11 @@ from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.output_parsers import PydanticOutputParser from langgraph.checkpoint.memory import MemorySaver -from langgraph.graph import END, START, StateGraph, MessagesState +from langgraph.graph import END, START, MessagesState, StateGraph from langgraph.types import Command, interrupt from pydantic import BaseModel, Field - from uipath.platform import UiPath -from uipath.platform.common import CreateTask + from uipath_langchain.chat import UiPathChat # Configuration @@ -27,9 +26,11 @@ TicketCategory = Literal["security", "error", "system", "billing", "performance"] NextNode = Literal["classify", "notify_team"] + # Data Models class GraphInput(BaseModel): """Input model for the ticket classification graph.""" + message: str ticket_id: str assignee: str | None = None @@ -37,12 +38,14 @@ class GraphInput(BaseModel): class GraphOutput(BaseModel): """Output model for the ticket classification graph.""" + label: str confidence: float class GraphState(MessagesState): """State model for the ticket classification workflow.""" + message: str ticket_id: str assignee: str | None @@ -54,6 +57,7 @@ class GraphState(MessagesState): class TicketClassification(BaseModel): """Model for ticket classification results.""" + label: TicketCategory = Field( description="The classification label for the support ticket" ) @@ -90,6 +94,7 @@ def create_system_message() -> str: format_instructions=output_parser.get_format_instructions() ) + # Node Functions def prepare_input(graph_input: GraphInput) -> GraphState: """Prepare the initial state from graph input.""" @@ -99,7 +104,7 @@ def prepare_input(graph_input: GraphInput) -> GraphState: assignee=graph_input.assignee, messages=[ SystemMessage(content=create_system_message()), - HumanMessage(content=graph_input.message) + HumanMessage(content=graph_input.message), ], last_predicted_category=None, human_approval=None, @@ -112,9 +117,10 @@ def decide_next_node(state: GraphState) -> NextNode: return "notify_team" return "classify" + async def classify(state: GraphState) -> Command: """Classify the support ticket using LLM.""" - llm = UiPathChat() + llm = UiPathChat(model="gpt-4o-mini-2024-07-18") # Add rejection message if there was a previous prediction if state.get("last_predicted_category"): @@ -151,7 +157,10 @@ async def classify(state: GraphState) -> Command: } ) -def create_approval_message(ticket_id: str, ticket_message: str, label: str, confidence: float) -> str: + +def create_approval_message( + ticket_id: str, ticket_message: str, label: str, confidence: float +) -> str: """Create formatted message for human approval.""" return ( f"This is how I classified the ticket: '{ticket_id}', " @@ -170,8 +179,6 @@ async def wait_for_human(state: GraphState) -> Command: confidence = state["confidence"] is_resume = state.get("human_approval") is not None - - if not is_resume: logger.info("Waiting for human approval via regular interrupt") interrupt_message = ( @@ -187,6 +194,7 @@ async def wait_for_human(state: GraphState) -> Command: } ) + async def notify_team(state: GraphState) -> GraphOutput: """Send team notification and return final output.""" logger.info("Sending team email notification") diff --git a/tests/agent/react/test_create_agent.py b/tests/agent/react/test_create_agent.py index 0ea8e3253..81c2bdb8c 100644 --- a/tests/agent/react/test_create_agent.py +++ b/tests/agent/react/test_create_agent.py @@ -21,28 +21,16 @@ from uipath_langchain.agent.react.types import ( AgentGraphConfig, AgentGraphNode, - AgentSettings, ) -from uipath_langchain.chat.types import APIFlavor, LLMProvider def _make_mock_model() -> MagicMock: - """Create a mock chat model that satisfies UiPathPassthroughChatModel protocol.""" - model = MagicMock(spec=BaseChatModel) - model.llm_provider = LLMProvider.OPENAI - model.api_flavor = APIFlavor.OPENAI_RESPONSES - - # Protocol check: make isinstance(model, UiPathPassthroughChatModel) return True - from uipath_langchain.chat.types import UiPathPassthroughChatModel + """Create a mock chat model for testing. - model.__class__ = type( - "MockUiPathModel", - (UiPathPassthroughChatModel,), - { - "llm_provider": property(lambda self: LLMProvider.OPENAI), - "api_flavor": property(lambda self: APIFlavor.OPENAI_RESPONSES), - }, - ) + Returns a simple BaseChatModel mock. Since it's not a UiPathBaseLLMClient, + the agent will set agent_settings=None. + """ + model = MagicMock(spec=BaseChatModel) return model @@ -167,10 +155,7 @@ def test_autonomous_agent_with_tools( messages, None, # input schema False, # is_conversational - AgentSettings( - llm_provider=mock_model.llm_provider, - api_flavor=mock_model.api_flavor, - ), + None, # agent_settings (None for non-UiPathBaseLLMClient models) ) mock_create_terminate_node.assert_called_once_with( None, # output schema @@ -266,10 +251,7 @@ def test_conversational_agent_with_tools( messages, None, # input schema True, # is_conversational - AgentSettings( - llm_provider=mock_model.llm_provider, - api_flavor=mock_model.api_flavor, - ), + None, # agent_settings (None for non-UiPathBaseLLMClient models) ) mock_create_terminate_node.assert_called_once_with( None, # output schema diff --git a/tests/agent/react/test_llm_node.py b/tests/agent/react/test_llm_node.py index 6420e89f2..f7b7bffe2 100644 --- a/tests/agent/react/test_llm_node.py +++ b/tests/agent/react/test_llm_node.py @@ -11,7 +11,6 @@ from uipath_langchain.agent.react.llm_node import create_llm_node from uipath_langchain.agent.react.types import AgentGraphState -from uipath_langchain.chat.types import APIFlavor, LLMProvider class TestLLMNodeParallelToolCalls: @@ -101,13 +100,10 @@ def setup_method(self): self.regular_tool = Mock(spec=BaseTool) self.regular_tool.name = "regular_tool" - # Create mock chat model that implements UiPathPassthroughChatModel + # Create mock chat model that implements UiPathBaseLLMClient self.mock_model = Mock(spec=BaseChatModel) self.mock_model.bind_tools.return_value = self.mock_model self.mock_model.bind.return_value = self.mock_model - # Add UiPath protocol properties - self.mock_model.llm_provider = LLMProvider.OPENAI - self.mock_model.api_flavor = APIFlavor.OPENAI_RESPONSES # Create test state self.test_state = AgentGraphState(messages=[HumanMessage(content="Test query")]) diff --git a/tests/chat/__init__.py b/tests/chat/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/chat/retryers/test_retryers.py b/tests/chat/retryers/test_retryers.py deleted file mode 100644 index 30a787d30..000000000 --- a/tests/chat/retryers/test_retryers.py +++ /dev/null @@ -1,274 +0,0 @@ -"""Unit tests for retry-after-aware retry strategy.""" - -from typing import NoReturn, cast -from unittest.mock import MagicMock, patch - -import botocore.exceptions -import httpx -from google.genai import errors as genai_errors -from langchain_google_genai.chat_models import ChatGoogleGenerativeAIError -from tenacity import wait_none - -from uipath_langchain.chat.retryers.bedrock import ( - AsyncBedrockRetryer, -) -from uipath_langchain.chat.retryers.vertex import ( - AsyncVertexRetryer, - VertexRetryer, -) - - -def raise_boto3_error( - error_code: str, - message: str, - status_code: int = 429, - headers: dict[str, str] | None = None, -) -> NoReturn: - """Raise a botocore ClientError for testing.""" - error_response_dict = { - "Error": { - "Code": error_code, - "Message": message, - }, - "ResponseMetadata": { - "HTTPStatusCode": status_code, - "HTTPHeaders": headers or {}, - }, - } - raise botocore.exceptions.ClientError( - cast("botocore.exceptions._ClientErrorResponseTypeDef", error_response_dict), - "test_operation", - ) - - -def raise_google_genai_error( - status_code: int = 429, - message: str = "Resource exhausted", - headers: dict[str, str] | None = None, -) -> NoReturn: - """Raise a ChatGoogleGenerativeAIError wrapping google.genai.errors.ClientError.""" - response = MagicMock() - response.status_code = status_code - response.headers = headers or {} - response.text = message - - response_json = {"error": {"message": message}} - - client_error = genai_errors.ClientError( - code=status_code, - response_json=response_json, - response=response, - ) - genai_error = ChatGoogleGenerativeAIError(message) - genai_error.__cause__ = client_error - raise genai_error - - -class TestVertexRetryStrategy: - """Tests for VertexRetryStrategy and AsyncVertexRetryStrategy classes.""" - - def test_retry_strategy_retries_on_exception( - self, - ) -> None: - call_count = 0 - - def failing_function(): - nonlocal call_count - call_count += 1 - if call_count < 3: - raise_google_genai_error(429) - return "Success" - - retryer = VertexRetryer(max_retries=5) - retryer.wait = wait_none() - - result = retryer(failing_function) - - assert result == "Success" - assert call_count == 3 # Failed twice, succeeded on third attempt - - async def test_async_retry_strategy_retries_on_exception( - self, - ) -> None: - call_count = 0 - - async def failing_async_function(): - nonlocal call_count - call_count += 1 - if call_count < 3: - raise_google_genai_error(429) - return "Success" - - retryer = AsyncVertexRetryer(max_retries=5) - retryer.wait = wait_none() - - result: str = await retryer(failing_async_function) - - assert result == "Success" - assert call_count == 3 # Failed twice, succeeded on third attempt - - async def test_no_header_but_retryable_status_code_falls_back_to_exponential_backoff( - self, - ) -> None: - call_count = 0 - sleep_delays: list[float] = [] - - async def always_failing_async_function(): - nonlocal call_count - call_count += 1 - raise_google_genai_error(503) - - retryer = AsyncVertexRetryer(max_retries=4, initial=5.0, max_delay=60.0) - - with patch("asyncio.sleep") as mock_sleep: - mock_sleep.side_effect = lambda delay: sleep_delays.append(delay) - try: - await retryer(always_failing_async_function) - except ChatGoogleGenerativeAIError: - pass - - assert call_count == 4 - assert len(sleep_delays) == 3 - - # Verify exponential growth: 5s, 10s, 20s (with small jitter tolerance) - assert 5.0 <= sleep_delays[0] <= 6.0 - assert 10.0 <= sleep_delays[1] <= 11.0 - assert 20.0 <= sleep_delays[2] <= 21.0 - - async def test_no_status_code_but_retryable_exception_falls_back_to_exponential_backoff( - self, - ) -> None: - call_count = 0 - sleep_delays: list[float] = [] - - async def always_failing_async_function(): - nonlocal call_count - call_count += 1 - raise httpx.TimeoutException("Timeout") - - retryer = AsyncVertexRetryer( - max_retries=4, - initial=5.0, - max_delay=60.0, - ) - - with patch("asyncio.sleep") as mock_sleep: - mock_sleep.side_effect = lambda delay: sleep_delays.append(delay) - try: - await retryer(always_failing_async_function) - except httpx.TimeoutException: - pass - - assert call_count == 4 - assert len(sleep_delays) == 3 - - # Verify exponential growth: 5s, 10s, 20s (with small jitter tolerance) - assert 5.0 <= sleep_delays[0] <= 6.0 - assert 10.0 <= sleep_delays[1] <= 11.0 - assert 20.0 <= sleep_delays[2] <= 21.0 - - async def test_async_retry_strategy_respects_max_retries( - self, - ) -> None: - """Test that the async retry strategy respects max_retries.""" - call_count = 0 - - async def always_failing_async_function(): - nonlocal call_count - call_count += 1 - raise_google_genai_error(503) - - retryer = AsyncVertexRetryer(max_retries=3) - retryer.wait = wait_none() - - try: - await retryer(always_failing_async_function) - raise AssertionError("Should have raised an exception") - except ChatGoogleGenerativeAIError: - pass - - assert call_count == 3 # Should stop after 3 attempts - - async def test_retry_strategy_uses_retry_after_header_with_google_genai( - self, - ) -> None: - call_count = 0 - sleep_delays: list[float] = [] - - async def function_with_retry_after(): - nonlocal call_count - call_count += 1 - if call_count < 2: - raise_google_genai_error( - 429, - "Resource exhausted", - headers={"retry-after": "30"}, - ) - return "Success" - - retryer = AsyncVertexRetryer(max_retries=3) - - with patch("asyncio.sleep") as mock_sleep: - mock_sleep.side_effect = lambda delay: sleep_delays.append(delay) - result: str = await retryer(function_with_retry_after) - - assert result == "Success" - assert call_count == 2 - assert len(sleep_delays) == 1 - assert sleep_delays[0] == 30.0 - - async def test_retry_strategy_uses_retry_after_header_with_vertex(self) -> None: - call_count = 0 - sleep_delays: list[float] = [] - - async def function_with_retry_after(): - nonlocal call_count - call_count += 1 - if call_count < 2: - raise_google_genai_error( - 429, - "Resource exhausted", - headers={"retry-after": "45"}, - ) - return "Success" - - retryer = AsyncVertexRetryer(max_retries=3) - - with patch("asyncio.sleep") as mock_sleep: - mock_sleep.side_effect = lambda delay: sleep_delays.append(delay) - result: str = await retryer(function_with_retry_after) - - assert result == "Success" - assert call_count == 2 - assert len(sleep_delays) == 1 - assert sleep_delays[0] == 45.0 - - -class TestBedrockRetryStrategy: - """Tests for BedrockRetryer and AsyncBedrockRetryer classes.""" - - async def test_retry_strategy_uses_retry_after_header_with_bedrock(self) -> None: - call_count = 0 - sleep_delays: list[float] = [] - - async def function_with_retry_after(): - nonlocal call_count - call_count += 1 - if call_count < 2: - raise_boto3_error( - "ThrottlingException", - "Rate exceeded", - headers={"retry-after": "25"}, - ) - return "Success" - - retryer = AsyncBedrockRetryer(max_retries=3) - - with patch("asyncio.sleep") as mock_sleep: - mock_sleep.side_effect = lambda delay: sleep_delays.append(delay) - result: str = await retryer(function_with_retry_after) - - assert result == "Success" - assert call_count == 2 - assert len(sleep_delays) == 1 - assert sleep_delays[0] == 25.0 diff --git a/tests/chat/test_bedrock.py b/tests/chat/test_bedrock.py deleted file mode 100644 index e60adfd89..000000000 --- a/tests/chat/test_bedrock.py +++ /dev/null @@ -1,81 +0,0 @@ -import os -from unittest.mock import MagicMock, patch - -from langchain_aws import ChatBedrock -from langchain_core.messages import AIMessage, BaseMessage, HumanMessage -from langchain_core.messages.content import create_file_block -from langchain_core.outputs import ChatGeneration, ChatResult - -from uipath_langchain.chat.bedrock import UiPathChatBedrock - - -class TestConvertFileBlocksToAnthropicDocuments: - def test_converts_pdf_file_block_to_document(self): - messages: list[BaseMessage] = [ - HumanMessage( - content_blocks=[ - {"type": "text", "text": "Summarize this PDF"}, - create_file_block(base64="JVBER==", mime_type="application/pdf"), - ] - ) - ] - - result = UiPathChatBedrock._convert_file_blocks_to_anthropic_documents(messages) - - assert result[0].content[0] == {"type": "text", "text": "Summarize this PDF"} - assert result[0].content[1] == { - "type": "document", - "source": { - "type": "base64", - "media_type": "application/pdf", - "data": "JVBER==", - }, - } - - -class TestGenerate: - @patch.dict( - os.environ, - { - "UIPATH_URL": "https://example.com", - "UIPATH_ORGANIZATION_ID": "org", - "UIPATH_TENANT_ID": "tenant", - "UIPATH_ACCESS_TOKEN": "token", - }, - ) - @patch("uipath_langchain.chat.bedrock.boto3.client", return_value=MagicMock()) - def test_generate_converts_file_blocks(self, _mock_boto): - chat = UiPathChatBedrock() - - messages: list[BaseMessage] = [ - HumanMessage( - content_blocks=[ - {"type": "text", "text": "Summarize this PDF"}, - create_file_block(base64="JVBER==", mime_type="application/pdf"), - ] - ) - ] - - fake_result = ChatResult( - generations=[ChatGeneration(message=AIMessage(content="Summary"))] - ) - - with patch.object( - ChatBedrock, "_generate", return_value=fake_result - ) as mock_parent_generate: - result = chat._generate(messages) - - called_messages = mock_parent_generate.call_args[0][0] - assert called_messages[0].content[0] == { - "type": "text", - "text": "Summarize this PDF", - } - assert called_messages[0].content[1] == { - "type": "document", - "source": { - "type": "base64", - "media_type": "application/pdf", - "data": "JVBER==", - }, - } - assert result == fake_result diff --git a/tests/chat/test_openai_url_rewrite.py b/tests/chat/test_openai_url_rewrite.py deleted file mode 100644 index 89fd0a53f..000000000 --- a/tests/chat/test_openai_url_rewrite.py +++ /dev/null @@ -1,82 +0,0 @@ -import httpx - -from uipath_langchain.chat.openai import _rewrite_openai_url - - -class TestRewriteOpenAIUrl: - """Tests for the _rewrite_openai_url function.""" - - def test_rewrite_deployments_url(self): - """Test rewriting URLs with /openai/deployments/ pattern (responses: false).""" - original_url = "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/openai/deployments/gpt-5-mini-2025-08-07/chat/completions?api-version=2024-12-01-preview" - params = httpx.QueryParams({"api-version": "2024-12-01-preview"}) - - result = _rewrite_openai_url(original_url, params) - - assert result is not None - assert ( - str(result) - == "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/completions?api-version=2024-12-01-preview" - ) - - def test_rewrite_responses_url(self): - """Test rewriting URLs with /openai/responses pattern (responses: true).""" - original_url = "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/openai/responses?api-version=2024-12-01-preview" - params = httpx.QueryParams({"api-version": "2024-12-01-preview"}) - - result = _rewrite_openai_url(original_url, params) - - assert result is not None - assert ( - str(result) - == "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/completions?api-version=2024-12-01-preview" - ) - - def test_rewrite_base_url_with_query_params(self): - """Test rewriting base URL with query params (responses API base case).""" - original_url = "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07?api-version=2024-12-01-preview" - params = httpx.QueryParams({"api-version": "2024-12-01-preview"}) - - result = _rewrite_openai_url(original_url, params) - - assert result is not None - assert ( - str(result) - == "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/completions?api-version=2024-12-01-preview" - ) - - def test_rewrite_without_query_params(self): - """Test rewriting URL without query parameters.""" - original_url = "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/openai/responses" - params = httpx.QueryParams() - - result = _rewrite_openai_url(original_url, params) - - assert result is not None - assert ( - str(result) - == "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/completions" - ) - - def test_rewrite_localhost_url(self): - """Test rewriting localhost URL.""" - original_url = "https://localhost:7024/account/tenant/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/openai/deployments/gpt-5-mini-2025-08-07/chat/completions" - params = httpx.QueryParams() - - result = _rewrite_openai_url(original_url, params) - - assert result is not None - assert ( - str(result) - == "https://localhost:7024/account/tenant/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/completions" - ) - - def test_rewrite_preserves_different_api_versions(self): - """Test that different api-version values are preserved.""" - original_url = "https://cloud.uipath.com/account/tenant/agenthub_/llm/raw/vendor/openai/model/gpt-5-mini-2025-08-07/openai/responses?api-version=2025-04-01-preview" - params = httpx.QueryParams({"api-version": "2025-04-01-preview"}) - - result = _rewrite_openai_url(original_url, params) - - assert result is not None - assert "api-version=2025-04-01-preview" in str(result) diff --git a/tests/cli/mocks/joke_agent_with_guardrails.py b/tests/cli/mocks/joke_agent_with_guardrails.py index 059f41f1c..623407a28 100644 --- a/tests/cli/mocks/joke_agent_with_guardrails.py +++ b/tests/cli/mocks/joke_agent_with_guardrails.py @@ -23,7 +23,7 @@ from uipath.core.guardrails.guardrails import FieldReference, FieldSource from uipath.platform.guardrails.guardrails import NumberParameterValue -from uipath_langchain.chat.openai import UiPathChatOpenAI +from uipath_langchain.chat import UiPathAzureChatOpenAI # Mock Sentence Analyzer Tool @@ -75,7 +75,8 @@ class AgentOutput(BaseModel): all_tools = [sentence_analyzer_tool] # Create LLM (will be mocked in tests) -llm = UiPathChatOpenAI( +llm = UiPathAzureChatOpenAI( + model="gpt-4o-mini-2024-07-18", temperature=0.0, max_tokens=500, use_responses_api=True, diff --git a/tests/runtime/test_graph.py b/tests/runtime/test_graph.py index 006057d59..d16d03743 100644 --- a/tests/runtime/test_graph.py +++ b/tests/runtime/test_graph.py @@ -24,12 +24,7 @@ def test_agent_graph_schema(): # Setup movie_system_prompt = """You are an advanced AI assistant specializing in movie research and analysis.""" - llm = UiPathChat( - model="claude-3-7-sonnet-latest", - access_token="test-token", - azure_endpoint="test-base-url", - client_id="test-client-id", - ) + llm = UiPathChat(model="claude-3-7-sonnet-latest") graph: CompiledStateGraph[Any, Any, Any, Any] = create_agent( llm, tools=[search_movies], system_prompt=movie_system_prompt ) @@ -113,12 +108,7 @@ class Router(TypedDict): next: Literal["researcher", "coder", "FINISH"] - llm = UiPathChat( - model="claude-3-7-sonnet-latest", - access_token="test-token", - azure_endpoint="test-base-url", - client_id="test-client-id", - ) + llm = UiPathChat(model="claude-3-7-sonnet-latest") class GraphInput(BaseModel): question: str diff --git a/uv.lock b/uv.lock index 294507674..9d6446a72 100644 --- a/uv.lock +++ b/uv.lock @@ -149,6 +149,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, ] +[[package]] +name = "anthropic" +version = "0.79.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "docstring-parser" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/15/b1/91aea3f8fd180d01d133d931a167a78a3737b3fd39ccef2ae8d6619c24fd/anthropic-0.79.0.tar.gz", hash = "sha256:8707aafb3b1176ed6c13e2b1c9fb3efddce90d17aee5d8b83a86c70dcdcca871", size = 509825, upload-time = "2026-02-07T18:06:18.388Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/b2/cc0b8e874a18d7da50b0fda8c99e4ac123f23bf47b471827c5f6f3e4a767/anthropic-0.79.0-py3-none-any.whl", hash = "sha256:04cbd473b6bbda4ca2e41dd670fe2f829a911530f01697d0a1e37321eb75f3cf", size = 405918, upload-time = "2026-02-07T18:06:20.246Z" }, +] + +[package.optional-dependencies] +bedrock = [ + { name = "boto3" }, + { name = "botocore" }, +] +vertex = [ + { name = "google-auth", extra = ["requests"] }, +] + [[package]] name = "anyio" version = "4.12.1" @@ -180,6 +208,149 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, ] +[[package]] +name = "azure-ai-agents" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core" }, + { name = "isodate" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/98/bbe2e9e5b0a934be1930545025bf7018ebc4cc33b10134cc3314d6487076/azure_ai_agents-1.1.0.tar.gz", hash = "sha256:eb9d7226282d03206c3fab3f3ee0a2fc71e0ad38e52d2f4f19a92c56ed951aea", size = 303656, upload-time = "2025-08-05T19:02:26.7Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/31/43750555bf20d3d2d7589fcd775c96ce7c96e58e208b81c1ed6d4bad6c5f/azure_ai_agents-1.1.0-py3-none-any.whl", hash = "sha256:f660bb0d564aeb88e33140ebc1e4700d2e36e2e12ee60c3346915d702a9310a9", size = 191126, upload-time = "2025-08-05T19:02:28.178Z" }, +] + +[[package]] +name = "azure-ai-inference" +version = "1.0.0b9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core" }, + { name = "isodate" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/6a/ed85592e5c64e08c291992f58b1a94dab6869f28fb0f40fd753dced73ba6/azure_ai_inference-1.0.0b9.tar.gz", hash = "sha256:1feb496bd84b01ee2691befc04358fa25d7c344d8288e99364438859ad7cd5a4", size = 182408, upload-time = "2025-02-15T00:37:28.464Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/0f/27520da74769db6e58327d96c98e7b9a07ce686dff582c9a5ec60b03f9dd/azure_ai_inference-1.0.0b9-py3-none-any.whl", hash = "sha256:49823732e674092dad83bb8b0d1b65aa73111fab924d61349eb2a8cdc0493990", size = 124885, upload-time = "2025-02-15T00:37:29.964Z" }, +] + +[package.optional-dependencies] +opentelemetry = [ + { name = "azure-core-tracing-opentelemetry" }, +] + +[[package]] +name = "azure-ai-projects" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-ai-agents" }, + { name = "azure-core" }, + { name = "azure-storage-blob" }, + { name = "isodate" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dd/95/9c04cb5f658c7f856026aa18432e0f0fa254ead2983a3574a0f5558a7234/azure_ai_projects-1.0.0.tar.gz", hash = "sha256:b5f03024ccf0fd543fbe0f5abcc74e45b15eccc1c71ab87fc71c63061d9fd63c", size = 130798, upload-time = "2025-07-31T02:09:27.912Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/db/7149cdf71e12d9737f186656176efc94943ead4f205671768c1549593efe/azure_ai_projects-1.0.0-py3-none-any.whl", hash = "sha256:81369ed7a2f84a65864f57d3fa153e16c30f411a1504d334e184fb070165a3fa", size = 115188, upload-time = "2025-07-31T02:09:29.362Z" }, +] + +[[package]] +name = "azure-common" +version = "1.1.28" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3e/71/f6f71a276e2e69264a97ad39ef850dca0a04fce67b12570730cb38d0ccac/azure-common-1.1.28.zip", hash = "sha256:4ac0cd3214e36b6a1b6a442686722a5d8cc449603aa833f3f0f40bda836704a3", size = 20914, upload-time = "2022-02-03T19:39:44.373Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/55/7f118b9c1b23ec15ca05d15a578d8207aa1706bc6f7c87218efffbbf875d/azure_common-1.1.28-py2.py3-none-any.whl", hash = "sha256:5c12d3dcf4ec20599ca6b0d3e09e86e146353d443e7fcc050c9a19c1f9df20ad", size = 14462, upload-time = "2022-02-03T19:39:42.417Z" }, +] + +[[package]] +name = "azure-core" +version = "1.38.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/53/9b/23893febea484ad8183112c9419b5eb904773adb871492b5fa8ff7b21e09/azure_core-1.38.1.tar.gz", hash = "sha256:9317db1d838e39877eb94a2240ce92fa607db68adf821817b723f0d679facbf6", size = 363323, upload-time = "2026-02-11T02:03:06.051Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/88/aaea2ad269ce70b446660371286272c1f6ba66541a7f6f635baf8b0db726/azure_core-1.38.1-py3-none-any.whl", hash = "sha256:69f08ee3d55136071b7100de5b198994fc1c5f89d2b91f2f43156d20fcf200a4", size = 217930, upload-time = "2026-02-11T02:03:07.548Z" }, +] + +[[package]] +name = "azure-core-tracing-opentelemetry" +version = "1.0.0b12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core" }, + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/7f/5de13a331a5f2919417819cc37dcf7c897018f02f83aa82b733e6629a6a6/azure_core_tracing_opentelemetry-1.0.0b12.tar.gz", hash = "sha256:bb454142440bae11fd9d68c7c1d67ae38a1756ce808c5e4d736730a7b4b04144", size = 26010, upload-time = "2025-03-21T00:18:37.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/5e/97a471f66935e7f89f521d0e11ae49c7f0871ca38f5c319dccae2155c8d8/azure_core_tracing_opentelemetry-1.0.0b12-py3-none-any.whl", hash = "sha256:38fd42709f1cc4bbc4f2797008b1c30a6a01617e49910c05daa3a0d0c65053ac", size = 11962, upload-time = "2025-03-21T00:18:38.581Z" }, +] + +[[package]] +name = "azure-cosmos" +version = "4.14.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/7d/b503a14639caace96f6a5c801c817a4c9f7ac626d75d5bddb6757da0376b/azure_cosmos-4.14.6.tar.gz", hash = "sha256:5e7075d051c84de405a63de05752b78e3b31190e9ee04f8cf85b37a954cd7784", size = 2056812, upload-time = "2026-02-03T22:16:58.429Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/7c/bd50e62ac330d1c194b3473947ffbe2f99bb0269dbe41824d2534cf3251c/azure_cosmos-4.14.6-py3-none-any.whl", hash = "sha256:ea314e85eff9d31db980b7f2e88291579dedc77734bca7e9011d55d371fd0d5f", size = 395078, upload-time = "2026-02-03T22:17:01.44Z" }, +] + +[[package]] +name = "azure-identity" +version = "1.25.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core" }, + { name = "cryptography" }, + { name = "msal" }, + { name = "msal-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c2/3a/439a32a5e23e45f6a91f0405949dc66cfe6834aba15a430aebfc063a81e7/azure_identity-1.25.2.tar.gz", hash = "sha256:030dbaa720266c796221c6cdbd1999b408c079032c919fef725fcc348a540fe9", size = 284709, upload-time = "2026-02-11T01:55:42.323Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/77/f658c76f9e9a52c784bd836aaca6fd5b9aae176f1f53273e758a2bcda695/azure_identity-1.25.2-py3-none-any.whl", hash = "sha256:1b40060553d01a72ba0d708b9a46d0f61f56312e215d8896d836653ffdc6753d", size = 191423, upload-time = "2026-02-11T01:55:44.245Z" }, +] + +[[package]] +name = "azure-search-documents" +version = "11.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-common" }, + { name = "azure-core" }, + { name = "isodate" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/68/9d59a0bed5fd9581b45444e8abc3ecda97e0466ae0f03affc7cddfb9fa74/azure_search_documents-11.6.0.tar.gz", hash = "sha256:fcc807076ff82024be576ffccb0d0f3261e5c2a112a6666b86ec70bbdb2e1d64", size = 311194, upload-time = "2025-10-09T22:04:03.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/4c/d74e5c3ccc0b9ead0e400a2d70ded67554b56a5d799aaa8bf5baaacf4aea/azure_search_documents-11.6.0-py3-none-any.whl", hash = "sha256:c3eb2deaf7926844e99a881830861225ef68e8b3bc067a76019e87fc7f5586dc", size = 307935, upload-time = "2025-10-09T22:04:05.008Z" }, +] + +[[package]] +name = "azure-storage-blob" +version = "12.28.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core" }, + { name = "cryptography" }, + { name = "isodate" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/24/072ba8e27b0e2d8fec401e9969b429d4f5fc4c8d4f0f05f4661e11f7234a/azure_storage_blob-12.28.0.tar.gz", hash = "sha256:e7d98ea108258d29aa0efbfd591b2e2075fa1722a2fae8699f0b3c9de11eff41", size = 604225, upload-time = "2026-01-06T23:48:57.282Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d8/3a/6ef2047a072e54e1142718d433d50e9514c999a58f51abfff7902f3a72f8/azure_storage_blob-12.28.0-py3-none-any.whl", hash = "sha256:00fb1db28bf6a7b7ecaa48e3b1d5c83bfadacc5a678b77826081304bd87d6461", size = 431499, upload-time = "2026-01-06T23:48:58.995Z" }, +] + [[package]] name = "bidict" version = "0.23.1" @@ -191,56 +362,83 @@ wheels = [ [[package]] name = "boto3" -version = "1.42.49" +version = "1.42.50" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "jmespath" }, { name = "s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/20/91/105aa17e0f3a566d33e2d8a3b32a70f553b1ad500d9756c6dd63991d8354/boto3-1.42.49.tar.gz", hash = "sha256:9cd252f640567b86e92b0a8ffdd4ade9a3018ee357c724bff6a21b8c8a41be0c", size = 112877, upload-time = "2026-02-13T20:29:57.062Z" } +sdist = { url = "https://files.pythonhosted.org/packages/59/41/7a7280875ec000e280b0392478a5d6247bc88e7ecf2ae6ec8f4ddb35b014/boto3-1.42.50.tar.gz", hash = "sha256:38545d7e6e855fefc8a11e899ccbd6d2c9f64671d6648c2acfb1c78c1057a480", size = 112851, upload-time = "2026-02-16T20:42:09.203Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/12/b1/1fa30cd7b26617d59efbe3a4f3660a5b8b397a4623bf1e67016c4cb6dd0e/boto3-1.42.49-py3-none-any.whl", hash = "sha256:99e1df4361c3f6ff6ade65803c043ea96314826134962dd3b385433b309eb819", size = 140606, upload-time = "2026-02-13T20:29:55.366Z" }, -] - -[[package]] -name = "boto3-stubs" -version = "1.42.49" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "botocore-stubs" }, - { name = "types-s3transfer" }, - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d4/e2/fa495f8a89ac713cc8f0081e35c649d1a8fa7c3d6d8f08bd1e068ef65c1b/boto3_stubs-1.42.49.tar.gz", hash = "sha256:8edb22726cf4b733e008733f4d44adde389eb7648b0c22cf071a675fed8daaab", size = 100907, upload-time = "2026-02-13T21:07:26.348Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/47/de/302b3b56510d3ac3e78eb9b0e93914cf36d01a4fc79471c345636783c45d/boto3_stubs-1.42.49-py3-none-any.whl", hash = "sha256:ec4368a2905e4ccbba6d606cf3e0549f159962a0e5751384e8c6624116a1729a", size = 69780, upload-time = "2026-02-13T21:07:21.218Z" }, + { url = "https://files.pythonhosted.org/packages/5e/14/bf4077d843d737bec6f4176e113182a4435a1864e2a819ca07004da8a9ac/boto3-1.42.50-py3-none-any.whl", hash = "sha256:2fdf8f5349b130d62576068a6c47b3eec368a70bc28f16d8cce17c5f7e74fc2e", size = 140604, upload-time = "2026-02-16T20:42:06.652Z" }, ] [[package]] name = "botocore" -version = "1.42.49" +version = "1.42.50" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c5/95/c3a3765ab65073695161e7180d631428cb6e67c18d97e8897871dfe51fcc/botocore-1.42.49.tar.gz", hash = "sha256:333115a64a507697b0c450ade7e2d82bc8b4e21c0051542514532b455712bdcc", size = 14958380, upload-time = "2026-02-13T20:29:47.218Z" } +sdist = { url = "https://files.pythonhosted.org/packages/93/fd/e63789133b2bf044c8550cd6766ec93628b0ac18a03f2aa0b80171f0697a/botocore-1.42.50.tar.gz", hash = "sha256:de1e128e4898f4e66877bfabbbb03c61f99366f27520442539339e8a74afe3a5", size = 14958074, upload-time = "2026-02-16T20:41:58.814Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d6/cd/7e7ceeff26889d1fd923f069381e3b2b85ff6d46c6fd1409ed8f486cc06f/botocore-1.42.49-py3-none-any.whl", hash = "sha256:1c33544f72101eed4ccf903ebb667a803e14e25b2af4e0836e4b871da1c0af37", size = 14630510, upload-time = "2026-02-13T20:29:43.086Z" }, + { url = "https://files.pythonhosted.org/packages/aa/b8/b02ad16c5198e652eafdd8bad76aa62ac094afabbe1241b4be1cd4075666/botocore-1.42.50-py3-none-any.whl", hash = "sha256:3ec7004009d1557a881b1d076d54b5768230849fa9ccdebfd409f0571490e691", size = 14631256, upload-time = "2026-02-16T20:41:55.004Z" }, ] [[package]] -name = "botocore-stubs" -version = "1.42.41" +name = "bottleneck" +version = "1.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "types-awscrt" }, + { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0c/a8/a26608ff39e3a5866c6c79eda10133490205cbddd45074190becece3ff2a/botocore_stubs-1.42.41.tar.gz", hash = "sha256:dbeac2f744df6b814ce83ec3f3777b299a015cbea57a2efc41c33b8c38265825", size = 42411, upload-time = "2026-02-03T20:46:14.479Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/32/76/cab7af7f16c0b09347f2ebe7ffda7101132f786acb767666dce43055faab/botocore_stubs-1.42.41-py3-none-any.whl", hash = "sha256:9423110fb0e391834bd2ed44ae5f879d8cb370a444703d966d30842ce2bcb5f0", size = 66759, upload-time = "2026-02-03T20:46:13.02Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/14/d8/6d641573e210768816023a64966d66463f2ce9fc9945fa03290c8a18f87c/bottleneck-1.6.0.tar.gz", hash = "sha256:028d46ee4b025ad9ab4d79924113816f825f62b17b87c9e1d0d8ce144a4a0e31", size = 104311, upload-time = "2025-09-08T16:30:38.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/96/9d51012d729f97de1e75aad986f3ba50956742a40fc99cbab4c2aa896c1c/bottleneck-1.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69ef4514782afe39db2497aaea93b1c167ab7ab3bc5e3930500ef9cf11841db7", size = 100400, upload-time = "2025-09-08T16:29:44.464Z" }, + { url = "https://files.pythonhosted.org/packages/16/f4/4fcbebcbc42376a77e395a6838575950587e5eb82edf47d103f8daa7ba22/bottleneck-1.6.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:727363f99edc6dc83d52ed28224d4cb858c07a01c336c7499c0c2e5dd4fd3e4a", size = 375920, upload-time = "2025-09-08T16:29:45.52Z" }, + { url = "https://files.pythonhosted.org/packages/36/13/7fa8cdc41cbf2dfe0540f98e1e0caf9ffbd681b1a0fc679a91c2698adaf9/bottleneck-1.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:847671a9e392220d1dfd2ff2524b4d61ec47b2a36ea78e169d2aa357fd9d933a", size = 367922, upload-time = "2025-09-08T16:29:46.743Z" }, + { url = "https://files.pythonhosted.org/packages/13/7d/dccfa4a2792c1bdc0efdde8267e527727e517df1ff0d4976b84e0268c2f9/bottleneck-1.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:daef2603ab7b4ec4f032bb54facf5fa92dacd3a264c2fd9677c9fc22bcb5a245", size = 361379, upload-time = "2025-09-08T16:29:48.042Z" }, + { url = "https://files.pythonhosted.org/packages/93/42/21c0fad823b71c3a8904cbb847ad45136d25573a2d001a9cff48d3985fab/bottleneck-1.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fc7f09bda980d967f2e9f1a746eda57479f824f66de0b92b9835c431a8c922d4", size = 371911, upload-time = "2025-09-08T16:29:49.366Z" }, + { url = "https://files.pythonhosted.org/packages/3b/b0/830ff80f8c74577d53034c494639eac7a0ffc70935c01ceadfbe77f590c2/bottleneck-1.6.0-cp311-cp311-win32.whl", hash = "sha256:1f78bad13ad190180f73cceb92d22f4101bde3d768f4647030089f704ae7cac7", size = 107831, upload-time = "2025-09-08T16:29:51.397Z" }, + { url = "https://files.pythonhosted.org/packages/6f/42/01d4920b0aa51fba503f112c90714547609bbe17b6ecfc1c7ae1da3183df/bottleneck-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:8f2adef59fdb9edf2983fe3a4c07e5d1b677c43e5669f4711da2c3daad8321ad", size = 113358, upload-time = "2025-09-08T16:29:52.602Z" }, + { url = "https://files.pythonhosted.org/packages/8d/72/7e3593a2a3dd69ec831a9981a7b1443647acb66a5aec34c1620a5f7f8498/bottleneck-1.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3bb16a16a86a655fdbb34df672109a8a227bb5f9c9cf5bb8ae400a639bc52fa3", size = 100515, upload-time = "2025-09-08T16:29:55.141Z" }, + { url = "https://files.pythonhosted.org/packages/b5/d4/e7bbea08f4c0f0bab819d38c1a613da5f194fba7b19aae3e2b3a27e78886/bottleneck-1.6.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0fbf5d0787af9aee6cef4db9cdd14975ce24bd02e0cc30155a51411ebe2ff35f", size = 377451, upload-time = "2025-09-08T16:29:56.718Z" }, + { url = "https://files.pythonhosted.org/packages/fe/80/a6da430e3b1a12fd85f9fe90d3ad8fe9a527ecb046644c37b4b3f4baacfc/bottleneck-1.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d08966f4a22384862258940346a72087a6f7cebb19038fbf3a3f6690ee7fd39f", size = 368303, upload-time = "2025-09-08T16:29:57.834Z" }, + { url = "https://files.pythonhosted.org/packages/30/11/abd30a49f3251f4538430e5f876df96f2b39dabf49e05c5836820d2c31fe/bottleneck-1.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:604f0b898b43b7bc631c564630e936a8759d2d952641c8b02f71e31dbcd9deaa", size = 361232, upload-time = "2025-09-08T16:29:59.104Z" }, + { url = "https://files.pythonhosted.org/packages/1d/ac/1c0e09d8d92b9951f675bd42463ce76c3c3657b31c5bf53ca1f6dd9eccff/bottleneck-1.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d33720bad761e642abc18eda5f188ff2841191c9f63f9d0c052245decc0faeb9", size = 373234, upload-time = "2025-09-08T16:30:00.488Z" }, + { url = "https://files.pythonhosted.org/packages/fb/ea/382c572ae3057ba885d484726bb63629d1f63abedf91c6cd23974eb35a9b/bottleneck-1.6.0-cp312-cp312-win32.whl", hash = "sha256:a1e5907ec2714efbe7075d9207b58c22ab6984a59102e4ecd78dced80dab8374", size = 108020, upload-time = "2025-09-08T16:30:01.773Z" }, + { url = "https://files.pythonhosted.org/packages/48/ad/d71da675eef85ac153eef5111ca0caa924548c9591da00939bcabba8de8e/bottleneck-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:81e3822499f057a917b7d3972ebc631ac63c6bbcc79ad3542a66c4c40634e3a6", size = 113493, upload-time = "2025-09-08T16:30:02.872Z" }, + { url = "https://files.pythonhosted.org/packages/97/1a/e117cd5ff7056126d3291deb29ac8066476e60b852555b95beb3fc9d62a0/bottleneck-1.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015de414ca016ebe56440bdf5d3d1204085080527a3c51f5b7b7a3e704fe6fd", size = 100521, upload-time = "2025-09-08T16:30:03.89Z" }, + { url = "https://files.pythonhosted.org/packages/bd/22/05555a9752357e24caa1cd92324d1a7fdde6386aab162fcc451f8f8eedc2/bottleneck-1.6.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:456757c9525b0b12356f472e38020ed4b76b18375fd76e055f8d33fb62956f5e", size = 377719, upload-time = "2025-09-08T16:30:05.135Z" }, + { url = "https://files.pythonhosted.org/packages/11/ee/76593af47097d9633109bed04dbcf2170707dd84313ca29f436f9234bc51/bottleneck-1.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1c65254d51b6063c55f6272f175e867e2078342ae75f74be29d6612e9627b2c0", size = 368577, upload-time = "2025-09-08T16:30:06.387Z" }, + { url = "https://files.pythonhosted.org/packages/f9/f7/4dcacaf637d2b8d89ea746c74159adda43858d47358978880614c3fa4391/bottleneck-1.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a172322895fbb79c6127474f1b0db0866895f0b804a18d5c6b841fea093927fe", size = 361441, upload-time = "2025-09-08T16:30:07.613Z" }, + { url = "https://files.pythonhosted.org/packages/05/34/21eb1eb1c42cb7be2872d0647c292fc75768d14e1f0db66bf907b24b2464/bottleneck-1.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5e81b642eb0d5a5bf00312598d7ed142d389728b694322a118c26813f3d1fa9", size = 373416, upload-time = "2025-09-08T16:30:08.899Z" }, + { url = "https://files.pythonhosted.org/packages/48/cb/7957ff40367a151139b5f1854616bf92e578f10804d226fbcdecfd73aead/bottleneck-1.6.0-cp313-cp313-win32.whl", hash = "sha256:543d3a89d22880cd322e44caff859af6c0489657bf9897977d1f5d3d3f77299c", size = 108029, upload-time = "2025-09-08T16:30:09.909Z" }, + { url = "https://files.pythonhosted.org/packages/90/a8/735df4156fa5595501d5d96a6ee102f49c13d2ce9e2a287ad51806bc3ba0/bottleneck-1.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:48a44307d604ceb81e256903e5d57d3adb96a461b1d3c6a69baa2c67e823bd36", size = 113497, upload-time = "2025-09-08T16:30:10.82Z" }, + { url = "https://files.pythonhosted.org/packages/c7/5c/8c1260df8ade7cebc2a8af513a27082b5e36aa4a5fb762d56ea6d969d893/bottleneck-1.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:547e6715115867c4657c9ae8cc5ddac1fec8fdad66690be3a322a7488721b06b", size = 101606, upload-time = "2025-09-08T16:30:11.935Z" }, + { url = "https://files.pythonhosted.org/packages/ce/ea/f03e2944e91ee962922c834ed21e5be6d067c8395681f5dc6c67a0a26853/bottleneck-1.6.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5e4a4a6e05b6f014c307969129e10d1a0afd18f3a2c127b085532a4a76677aef", size = 391804, upload-time = "2025-09-08T16:30:13.13Z" }, + { url = "https://files.pythonhosted.org/packages/0b/58/2b356b8a81eb97637dccee6cf58237198dd828890e38be9afb4e5e58e38e/bottleneck-1.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2baae0d1589b4a520b2f9cf03528c0c8b20717b3f05675e212ec2200cf628f12", size = 383443, upload-time = "2025-09-08T16:30:14.318Z" }, + { url = "https://files.pythonhosted.org/packages/55/52/cf7d09ed3736ad0d50c624787f9b580ae3206494d95cc0f4814b93eef728/bottleneck-1.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2e407139b322f01d8d5b6b2e8091b810f48a25c7fa5c678cfcdc420dfe8aea0a", size = 375458, upload-time = "2025-09-08T16:30:15.379Z" }, + { url = "https://files.pythonhosted.org/packages/c4/e9/7c87a34a24e339860064f20fac49f6738e94f1717bc8726b9c47705601d8/bottleneck-1.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1adefb89b92aba6de9c6ea871d99bcd29d519f4fb012cc5197917813b4fc2c7f", size = 386384, upload-time = "2025-09-08T16:30:17.012Z" }, + { url = "https://files.pythonhosted.org/packages/59/57/db51855e18a47671801180be748939b4c9422a0544849af1919116346b5f/bottleneck-1.6.0-cp313-cp313t-win32.whl", hash = "sha256:64b8690393494074923780f6abdf5f5577d844b9d9689725d1575a936e74e5f0", size = 109448, upload-time = "2025-09-08T16:30:18.076Z" }, + { url = "https://files.pythonhosted.org/packages/bd/1e/683c090b624f13a5bf88a0be2241dc301e98b2fb72a45812a7ae6e456cc4/bottleneck-1.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:cb67247f65dcdf62af947c76c6c8b77d9f0ead442cac0edbaa17850d6da4e48d", size = 115190, upload-time = "2025-09-08T16:30:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/77/e2/eb7c08964a3f3c4719f98795ccd21807ee9dd3071a0f9ad652a5f19196ff/bottleneck-1.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:98f1d789042511a0f042b3bdcd2903e8567e956d3aa3be189cce3746daeb8550", size = 100544, upload-time = "2025-09-08T16:30:20.22Z" }, + { url = "https://files.pythonhosted.org/packages/99/ec/c6f3be848f37689f481797ce7d9807d5f69a199d7fc0e46044f9b708c468/bottleneck-1.6.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1fad24c99e39ad7623fc2a76d37feb26bd32e4dd170885edf4dbf4bfce2199a3", size = 378315, upload-time = "2025-09-08T16:30:21.409Z" }, + { url = "https://files.pythonhosted.org/packages/bf/8f/2d6600836e2ea8f14fcefac592dc83497e5b88d381470c958cb9cdf88706/bottleneck-1.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643e61e50a6f993debc399b495a1609a55b3bd76b057e433e4089505d9f605c7", size = 368978, upload-time = "2025-09-08T16:30:23.458Z" }, + { url = "https://files.pythonhosted.org/packages/9b/b5/bf72b49f5040212873b985feef5050015645e0a02204b591e1d265fc522a/bottleneck-1.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa668efbe4c6b200524ea0ebd537212da9b9801287138016fdf64119d6fcf201", size = 362074, upload-time = "2025-09-08T16:30:24.71Z" }, + { url = "https://files.pythonhosted.org/packages/1d/c8/c4891a0604eb680031390182c6e264247e3a9a8d067d654362245396fadf/bottleneck-1.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9f7dd35262e89e28fedd79d45022394b1fa1aceb61d2e747c6d6842e50546daa", size = 374019, upload-time = "2025-09-08T16:30:26.438Z" }, + { url = "https://files.pythonhosted.org/packages/e6/2d/ed096f8d1b9147e84914045dd89bc64e3c32eee49b862d1e20d573a9ab0d/bottleneck-1.6.0-cp314-cp314-win32.whl", hash = "sha256:bd90bec3c470b7fdfafc2fbdcd7a1c55a4e57b5cdad88d40eea5bc9bab759bf1", size = 110173, upload-time = "2025-09-08T16:30:27.521Z" }, + { url = "https://files.pythonhosted.org/packages/33/70/1414acb6ae378a15063cfb19a0a39d69d1b6baae1120a64d2b069902549b/bottleneck-1.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:b43b6d36a62ffdedc6368cf9a708e4d0a30d98656c2b5f33d88894e1bcfd6857", size = 115899, upload-time = "2025-09-08T16:30:28.524Z" }, + { url = "https://files.pythonhosted.org/packages/4e/ed/4570b5d8c1c85ce3c54963ebc37472231ed54f0b0d8dbb5dde14303f775f/bottleneck-1.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:53296707a8e195b5dcaa804b714bd222b5e446bd93cd496008122277eb43fa87", size = 101615, upload-time = "2025-09-08T16:30:29.556Z" }, + { url = "https://files.pythonhosted.org/packages/2d/93/c148faa07ae91f266be1f3fad1fde95aa2449e12937f3f3df2dd720b86e0/bottleneck-1.6.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d6df19cc48a83efd70f6d6874332aa31c3f5ca06a98b782449064abbd564cf0e", size = 392411, upload-time = "2025-09-08T16:30:31.186Z" }, + { url = "https://files.pythonhosted.org/packages/6e/1c/e6ad221d345a059e7efb2ad1d46a22d9fdae0486faef70555766e1123966/bottleneck-1.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96bb3a52cb3c0aadfedce3106f93ab940a49c9d35cd4ed612e031f6deb27e80f", size = 384022, upload-time = "2025-09-08T16:30:32.364Z" }, + { url = "https://files.pythonhosted.org/packages/4f/40/5b15c01eb8c59d59bc84c94d01d3d30797c961f10ec190f53c27e05d62ab/bottleneck-1.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d1db9e831b69d5595b12e79aeb04cb02873db35576467c8dd26cdc1ee6b74581", size = 376004, upload-time = "2025-09-08T16:30:33.731Z" }, + { url = "https://files.pythonhosted.org/packages/74/f6/cb228f5949553a5c01d1d5a3c933f0216d78540d9e0bf8dd4343bb449681/bottleneck-1.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4dd7ac619570865fcb7a0e8925df418005f076286ad2c702dd0f447231d7a055", size = 386909, upload-time = "2025-09-08T16:30:34.973Z" }, + { url = "https://files.pythonhosted.org/packages/09/9a/425065c37a67a9120bf53290371579b83d05bf46f3212cce65d8c01d470a/bottleneck-1.6.0-cp314-cp314t-win32.whl", hash = "sha256:7fb694165df95d428fe00b98b9ea7d126ef786c4a4b7d43ae2530248396cadcb", size = 111636, upload-time = "2025-09-08T16:30:36.044Z" }, + { url = "https://files.pythonhosted.org/packages/ad/23/c41006e42909ec5114a8961818412310aa54646d1eae0495dbff3598a095/bottleneck-1.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:174b80930ce82bd8456c67f1abb28a5975c68db49d254783ce2cb6983b4fea40", size = 117611, upload-time = "2025-09-08T16:30:37.055Z" }, ] [[package]] @@ -606,6 +804,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, ] +[[package]] +name = "docstring-parser" +version = "0.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" }, +] + [[package]] name = "filelock" version = "3.24.2" @@ -624,6 +831,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" }, ] +[[package]] +name = "fireworks-ai" +version = "0.15.15" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "httpx-ws" }, + { name = "pillow" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/5b/7ed59473cd8420a44d11d15594b00c3395e9af726f7c5a632171b02ffdc8/fireworks_ai-0.15.15.tar.gz", hash = "sha256:d558e02df06844cb33344d33ecfb1c619a5e82d2ec4d8f51a0a45b7de5d3f4a0", size = 91475, upload-time = "2025-06-20T21:11:27.957Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/e7/319a4ce37bed682741bc8ebbb84b7983da3d8cd7ac069d86b52a37d79f2e/fireworks_ai-0.15.15-py3-none-any.whl", hash = "sha256:1047b8e575a536898a827b089b0022c1fab207940f9773b90fa357ebf942f5c9", size = 112831, upload-time = "2025-06-20T21:11:26.701Z" }, +] + [[package]] name = "frozenlist" version = "1.8.0" @@ -730,119 +953,162 @@ wheels = [ ] [[package]] -name = "google-ai-generativelanguage" -version = "0.6.15" +name = "google-api-core" +version = "2.29.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "google-api-core", version = "2.25.2", source = { registry = "https://pypi.org/simple" }, extra = ["grpc"], marker = "python_full_version >= '3.14'" }, - { name = "google-api-core", version = "2.29.0", source = { registry = "https://pypi.org/simple" }, extra = ["grpc"], marker = "python_full_version < '3.14'" }, { name = "google-auth" }, + { name = "googleapis-common-protos" }, { name = "proto-plus" }, { name = "protobuf" }, + { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/11/d1/48fe5d7a43d278e9f6b5ada810b0a3530bbeac7ed7fcbcd366f932f05316/google_ai_generativelanguage-0.6.15.tar.gz", hash = "sha256:8f6d9dc4c12b065fe2d0289026171acea5183ebf2d0b11cefe12f3821e159ec3", size = 1375443, upload-time = "2025-01-13T21:50:47.459Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/10/05572d33273292bac49c2d1785925f7bc3ff2fe50e3044cf1062c1dde32e/google_api_core-2.29.0.tar.gz", hash = "sha256:84181be0f8e6b04006df75ddfe728f24489f0af57c96a529ff7cf45bc28797f7", size = 177828, upload-time = "2026-01-08T22:21:39.269Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/a3/67b8a6ff5001a1d8864922f2d6488dc2a14367ceb651bc3f09a947f2f306/google_ai_generativelanguage-0.6.15-py3-none-any.whl", hash = "sha256:5a03ef86377aa184ffef3662ca28f19eeee158733e45d7947982eb953c6ebb6c", size = 1327356, upload-time = "2025-01-13T21:50:44.174Z" }, + { url = "https://files.pythonhosted.org/packages/77/b6/85c4d21067220b9a78cfb81f516f9725ea6befc1544ec9bd2c1acd97c324/google_api_core-2.29.0-py3-none-any.whl", hash = "sha256:d30bc60980daa36e314b5d5a3e5958b0200cb44ca8fa1be2b614e932b75a3ea9", size = 173906, upload-time = "2026-01-08T22:21:36.093Z" }, +] + +[package.optional-dependencies] +grpc = [ + { name = "grpcio" }, + { name = "grpcio-status" }, ] [[package]] -name = "google-api-core" -version = "2.25.2" +name = "google-auth" +version = "2.48.0" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.14'", -] dependencies = [ - { name = "google-auth", marker = "python_full_version >= '3.14'" }, - { name = "googleapis-common-protos", marker = "python_full_version >= '3.14'" }, - { name = "proto-plus", marker = "python_full_version >= '3.14'" }, - { name = "protobuf", marker = "python_full_version >= '3.14'" }, - { name = "requests", marker = "python_full_version >= '3.14'" }, + { name = "cryptography" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/09/cd/63f1557235c2440fe0577acdbc32577c5c002684c58c7f4d770a92366a24/google_api_core-2.25.2.tar.gz", hash = "sha256:1c63aa6af0d0d5e37966f157a77f9396d820fba59f9e43e9415bc3dc5baff300", size = 166266, upload-time = "2025-10-03T00:07:34.778Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0c/41/242044323fbd746615884b1c16639749e73665b718209946ebad7ba8a813/google_auth-2.48.0.tar.gz", hash = "sha256:4f7e706b0cd3208a3d940a19a822c37a476ddba5450156c3e6624a71f7c841ce", size = 326522, upload-time = "2026-01-26T19:22:47.157Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/d8/894716a5423933f5c8d2d5f04b16f052a515f78e815dab0c2c6f1fd105dc/google_api_core-2.25.2-py3-none-any.whl", hash = "sha256:e9a8f62d363dc8424a8497f4c2a47d6bcda6c16514c935629c257ab5d10210e7", size = 162489, upload-time = "2025-10-03T00:07:32.924Z" }, + { url = "https://files.pythonhosted.org/packages/83/1d/d6466de3a5249d35e832a52834115ca9d1d0de6abc22065f049707516d47/google_auth-2.48.0-py3-none-any.whl", hash = "sha256:2e2a537873d449434252a9632c28bfc268b0adb1e53f9fb62afc5333a975903f", size = 236499, upload-time = "2026-01-26T19:22:45.099Z" }, ] [package.optional-dependencies] -grpc = [ - { name = "grpcio", marker = "python_full_version >= '3.14'" }, - { name = "grpcio-status", marker = "python_full_version >= '3.14'" }, +requests = [ + { name = "requests" }, ] [[package]] -name = "google-api-core" -version = "2.29.0" +name = "google-cloud-aiplatform" +version = "1.137.0" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version == '3.13.*'", - "python_full_version < '3.13'", -] dependencies = [ - { name = "google-auth", marker = "python_full_version < '3.14'" }, - { name = "googleapis-common-protos", marker = "python_full_version < '3.14'" }, - { name = "proto-plus", marker = "python_full_version < '3.14'" }, - { name = "protobuf", marker = "python_full_version < '3.14'" }, - { name = "requests", marker = "python_full_version < '3.14'" }, + { name = "docstring-parser" }, + { name = "google-api-core", extra = ["grpc"] }, + { name = "google-auth" }, + { name = "google-cloud-bigquery" }, + { name = "google-cloud-resource-manager" }, + { name = "google-cloud-storage" }, + { name = "google-genai" }, + { name = "packaging" }, + { name = "proto-plus" }, + { name = "protobuf" }, + { name = "pydantic" }, + { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0d/10/05572d33273292bac49c2d1785925f7bc3ff2fe50e3044cf1062c1dde32e/google_api_core-2.29.0.tar.gz", hash = "sha256:84181be0f8e6b04006df75ddfe728f24489f0af57c96a529ff7cf45bc28797f7", size = 177828, upload-time = "2026-01-08T22:21:39.269Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/76/0da98f663f5c58239900fa8f99488d01439b1ca7846c9667217a3aee20b1/google_cloud_aiplatform-1.137.0.tar.gz", hash = "sha256:76e66e2c3879936e51039d8bbd82581451510b4c7a840a588daaecee893d7d1e", size = 9947045, upload-time = "2026-02-11T16:23:18.435Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/b6/85c4d21067220b9a78cfb81f516f9725ea6befc1544ec9bd2c1acd97c324/google_api_core-2.29.0-py3-none-any.whl", hash = "sha256:d30bc60980daa36e314b5d5a3e5958b0200cb44ca8fa1be2b614e932b75a3ea9", size = 173906, upload-time = "2026-01-08T22:21:36.093Z" }, -] - -[package.optional-dependencies] -grpc = [ - { name = "grpcio", marker = "python_full_version < '3.14'" }, - { name = "grpcio-status", marker = "python_full_version < '3.14'" }, + { url = "https://files.pythonhosted.org/packages/72/b5/795c410120cb350058b9328f051b57a49a897514ba1bc65677ade0f6c1be/google_cloud_aiplatform-1.137.0-py2.py3-none-any.whl", hash = "sha256:e99dd235c237cbbeb0e73b0fc4b1ca9588b4144ac243a6242b2005b339b40ce8", size = 8204286, upload-time = "2026-02-11T16:23:15.462Z" }, ] [[package]] -name = "google-api-python-client" -version = "2.190.0" +name = "google-cloud-bigquery" +version = "3.40.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "google-api-core", version = "2.25.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "google-api-core", version = "2.29.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "google-api-core", extra = ["grpc"] }, { name = "google-auth" }, - { name = "google-auth-httplib2" }, - { name = "httplib2" }, - { name = "uritemplate" }, + { name = "google-cloud-core" }, + { name = "google-resumable-media" }, + { name = "packaging" }, + { name = "python-dateutil" }, + { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e4/8d/4ab3e3516b93bb50ed7814738ea61d49cba3f72f4e331dc9518ae2731e92/google_api_python_client-2.190.0.tar.gz", hash = "sha256:5357f34552e3724d80d2604c8fa146766e0a9d6bb0afada886fafed9feafeef6", size = 14111143, upload-time = "2026-02-12T00:38:03.37Z" } +sdist = { url = "https://files.pythonhosted.org/packages/11/0c/153ee546c288949fcc6794d58811ab5420f3ecad5fa7f9e73f78d9512a6e/google_cloud_bigquery-3.40.1.tar.gz", hash = "sha256:75afcfb6e007238fe1deefb2182105249321145ff921784fe7b1de2b4ba24506", size = 511761, upload-time = "2026-02-12T18:44:18.958Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/07/ad/223d5f4b0b987669ffeb3eadd7e9f85ece633aa7fd3246f1e2f6238e1e05/google_api_python_client-2.190.0-py3-none-any.whl", hash = "sha256:d9b5266758f96c39b8c21d9bbfeb4e58c14dbfba3c931f7c5a8d7fdcd292dd57", size = 14682070, upload-time = "2026-02-12T00:38:00.974Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f5/081cf5b90adfe524ae0d671781b0d497a75a0f2601d075af518828e22d8f/google_cloud_bigquery-3.40.1-py3-none-any.whl", hash = "sha256:9082a6b8193aba87bed6a2c79cf1152b524c99bb7e7ac33a785e333c09eac868", size = 262018, upload-time = "2026-02-12T18:44:16.913Z" }, ] [[package]] -name = "google-auth" -version = "2.48.0" +name = "google-cloud-core" +version = "2.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cryptography" }, - { name = "pyasn1-modules" }, - { name = "rsa" }, + { name = "google-api-core" }, + { name = "google-auth" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0c/41/242044323fbd746615884b1c16639749e73665b718209946ebad7ba8a813/google_auth-2.48.0.tar.gz", hash = "sha256:4f7e706b0cd3208a3d940a19a822c37a476ddba5450156c3e6624a71f7c841ce", size = 326522, upload-time = "2026-01-26T19:22:47.157Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/03/ef0bc99d0e0faf4fdbe67ac445e18cdaa74824fd93cd069e7bb6548cb52d/google_cloud_core-2.5.0.tar.gz", hash = "sha256:7c1b7ef5c92311717bd05301aa1a91ffbc565673d3b0b4163a52d8413a186963", size = 36027, upload-time = "2025-10-29T23:17:39.513Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/83/1d/d6466de3a5249d35e832a52834115ca9d1d0de6abc22065f049707516d47/google_auth-2.48.0-py3-none-any.whl", hash = "sha256:2e2a537873d449434252a9632c28bfc268b0adb1e53f9fb62afc5333a975903f", size = 236499, upload-time = "2026-01-26T19:22:45.099Z" }, + { url = "https://files.pythonhosted.org/packages/89/20/bfa472e327c8edee00f04beecc80baeddd2ab33ee0e86fd7654da49d45e9/google_cloud_core-2.5.0-py3-none-any.whl", hash = "sha256:67d977b41ae6c7211ee830c7912e41003ea8194bff15ae7d72fd6f51e57acabc", size = 29469, upload-time = "2025-10-29T23:17:38.548Z" }, ] -[package.optional-dependencies] -requests = [ - { name = "requests" }, +[[package]] +name = "google-cloud-resource-manager" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core", extra = ["grpc"] }, + { name = "google-auth" }, + { name = "grpc-google-iam-v1" }, + { name = "grpcio" }, + { name = "proto-plus" }, + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/7f/db00b2820475793a52958dc55fe9ec2eb8e863546e05fcece9b921f86ebe/google_cloud_resource_manager-1.16.0.tar.gz", hash = "sha256:cc938f87cc36c2672f062b1e541650629e0d954c405a4dac35ceedee70c267c3", size = 459840, upload-time = "2026-01-15T13:04:07.726Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/ff/4b28bcc791d9d7e4ac8fea00fbd90ccb236afda56746a3b4564d2ae45df3/google_cloud_resource_manager-1.16.0-py3-none-any.whl", hash = "sha256:fb9a2ad2b5053c508e1c407ac31abfd1a22e91c32876c1892830724195819a28", size = 400218, upload-time = "2026-01-15T13:02:47.378Z" }, ] [[package]] -name = "google-auth-httplib2" -version = "0.3.0" +name = "google-cloud-storage" +version = "3.9.0" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "google-api-core" }, { name = "google-auth" }, - { name = "httplib2" }, + { name = "google-cloud-core" }, + { name = "google-crc32c" }, + { name = "google-resumable-media" }, + { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/ad/c1f2b1175096a8d04cf202ad5ea6065f108d26be6fc7215876bde4a7981d/google_auth_httplib2-0.3.0.tar.gz", hash = "sha256:177898a0175252480d5ed916aeea183c2df87c1f9c26705d74ae6b951c268b0b", size = 11134, upload-time = "2025-12-15T22:13:51.825Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f7/b1/4f0798e88285b50dfc60ed3a7de071def538b358db2da468c2e0deecbb40/google_cloud_storage-3.9.0.tar.gz", hash = "sha256:f2d8ca7db2f652be757e92573b2196e10fbc09649b5c016f8b422ad593c641cc", size = 17298544, upload-time = "2026-02-02T13:36:34.119Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/99/d5/3c97526c8796d3caf5f4b3bed2b05e8a7102326f00a334e7a438237f3b22/google_auth_httplib2-0.3.0-py3-none-any.whl", hash = "sha256:426167e5df066e3f5a0fc7ea18768c08e7296046594ce4c8c409c2457dd1f776", size = 9529, upload-time = "2025-12-15T22:13:51.048Z" }, + { url = "https://files.pythonhosted.org/packages/46/0b/816a6ae3c9fd096937d2e5f9670558908811d57d59ddf69dd4b83b326fd1/google_cloud_storage-3.9.0-py3-none-any.whl", hash = "sha256:2dce75a9e8b3387078cbbdad44757d410ecdb916101f8ba308abf202b6968066", size = 321324, upload-time = "2026-02-02T13:36:32.271Z" }, +] + +[[package]] +name = "google-crc32c" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/41/4b9c02f99e4c5fb477122cd5437403b552873f014616ac1d19ac8221a58d/google_crc32c-1.8.0.tar.gz", hash = "sha256:a428e25fb7691024de47fecfbff7ff957214da51eddded0da0ae0e0f03a2cf79", size = 14192, upload-time = "2025-12-16T00:35:25.142Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/ef/21ccfaab3d5078d41efe8612e0ed0bfc9ce22475de074162a91a25f7980d/google_crc32c-1.8.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:014a7e68d623e9a4222d663931febc3033c5c7c9730785727de2a81f87d5bab8", size = 31298, upload-time = "2025-12-16T00:20:32.241Z" }, + { url = "https://files.pythonhosted.org/packages/c5/b8/f8413d3f4b676136e965e764ceedec904fe38ae8de0cdc52a12d8eb1096e/google_crc32c-1.8.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:86cfc00fe45a0ac7359e5214a1704e51a99e757d0272554874f419f79838c5f7", size = 30872, upload-time = "2025-12-16T00:33:58.785Z" }, + { url = "https://files.pythonhosted.org/packages/f6/fd/33aa4ec62b290477181c55bb1c9302c9698c58c0ce9a6ab4874abc8b0d60/google_crc32c-1.8.0-cp311-cp311-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:19b40d637a54cb71e0829179f6cb41835f0fbd9e8eb60552152a8b52c36cbe15", size = 33243, upload-time = "2025-12-16T00:40:21.46Z" }, + { url = "https://files.pythonhosted.org/packages/71/03/4820b3bd99c9653d1a5210cb32f9ba4da9681619b4d35b6a052432df4773/google_crc32c-1.8.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:17446feb05abddc187e5441a45971b8394ea4c1b6efd88ab0af393fd9e0a156a", size = 33608, upload-time = "2025-12-16T00:40:22.204Z" }, + { url = "https://files.pythonhosted.org/packages/7c/43/acf61476a11437bf9733fb2f70599b1ced11ec7ed9ea760fdd9a77d0c619/google_crc32c-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:71734788a88f551fbd6a97be9668a0020698e07b2bf5b3aa26a36c10cdfb27b2", size = 34439, upload-time = "2025-12-16T00:35:20.458Z" }, + { url = "https://files.pythonhosted.org/packages/e9/5f/7307325b1198b59324c0fa9807cafb551afb65e831699f2ce211ad5c8240/google_crc32c-1.8.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:4b8286b659c1335172e39563ab0a768b8015e88e08329fa5321f774275fc3113", size = 31300, upload-time = "2025-12-16T00:21:56.723Z" }, + { url = "https://files.pythonhosted.org/packages/21/8e/58c0d5d86e2220e6a37befe7e6a94dd2f6006044b1a33edf1ff6d9f7e319/google_crc32c-1.8.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:2a3dc3318507de089c5384cc74d54318401410f82aa65b2d9cdde9d297aca7cb", size = 30867, upload-time = "2025-12-16T00:38:31.302Z" }, + { url = "https://files.pythonhosted.org/packages/ce/a9/a780cc66f86335a6019f557a8aaca8fbb970728f0efd2430d15ff1beae0e/google_crc32c-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:14f87e04d613dfa218d6135e81b78272c3b904e2a7053b841481b38a7d901411", size = 33364, upload-time = "2025-12-16T00:40:22.96Z" }, + { url = "https://files.pythonhosted.org/packages/21/3f/3457ea803db0198c9aaca2dd373750972ce28a26f00544b6b85088811939/google_crc32c-1.8.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cb5c869c2923d56cb0c8e6bcdd73c009c36ae39b652dbe46a05eb4ef0ad01454", size = 33740, upload-time = "2025-12-16T00:40:23.96Z" }, + { url = "https://files.pythonhosted.org/packages/df/c0/87c2073e0c72515bb8733d4eef7b21548e8d189f094b5dad20b0ecaf64f6/google_crc32c-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:3cc0c8912038065eafa603b238abf252e204accab2a704c63b9e14837a854962", size = 34437, upload-time = "2025-12-16T00:35:21.395Z" }, + { url = "https://files.pythonhosted.org/packages/d1/db/000f15b41724589b0e7bc24bc7a8967898d8d3bc8caf64c513d91ef1f6c0/google_crc32c-1.8.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:3ebb04528e83b2634857f43f9bb8ef5b2bbe7f10f140daeb01b58f972d04736b", size = 31297, upload-time = "2025-12-16T00:23:20.709Z" }, + { url = "https://files.pythonhosted.org/packages/d7/0d/8ebed0c39c53a7e838e2a486da8abb0e52de135f1b376ae2f0b160eb4c1a/google_crc32c-1.8.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:450dc98429d3e33ed2926fc99ee81001928d63460f8538f21a5d6060912a8e27", size = 30867, upload-time = "2025-12-16T00:43:14.628Z" }, + { url = "https://files.pythonhosted.org/packages/ce/42/b468aec74a0354b34c8cbf748db20d6e350a68a2b0912e128cabee49806c/google_crc32c-1.8.0-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3b9776774b24ba76831609ffbabce8cdf6fa2bd5e9df37b594221c7e333a81fa", size = 33344, upload-time = "2025-12-16T00:40:24.742Z" }, + { url = "https://files.pythonhosted.org/packages/1c/e8/b33784d6fc77fb5062a8a7854e43e1e618b87d5ddf610a88025e4de6226e/google_crc32c-1.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:89c17d53d75562edfff86679244830599ee0a48efc216200691de8b02ab6b2b8", size = 33694, upload-time = "2025-12-16T00:40:25.505Z" }, + { url = "https://files.pythonhosted.org/packages/92/b1/d3cbd4d988afb3d8e4db94ca953df429ed6db7282ed0e700d25e6c7bfc8d/google_crc32c-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:57a50a9035b75643996fbf224d6661e386c7162d1dfdab9bc4ca790947d1007f", size = 34435, upload-time = "2025-12-16T00:35:22.107Z" }, + { url = "https://files.pythonhosted.org/packages/21/88/8ecf3c2b864a490b9e7010c84fd203ec8cf3b280651106a3a74dd1b0ca72/google_crc32c-1.8.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:e6584b12cb06796d285d09e33f63309a09368b9d806a551d8036a4207ea43697", size = 31301, upload-time = "2025-12-16T00:24:48.527Z" }, + { url = "https://files.pythonhosted.org/packages/36/c6/f7ff6c11f5ca215d9f43d3629163727a272eabc356e5c9b2853df2bfe965/google_crc32c-1.8.0-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:f4b51844ef67d6cf2e9425983274da75f18b1597bb2c998e1c0a0e8d46f8f651", size = 30868, upload-time = "2025-12-16T00:48:12.163Z" }, + { url = "https://files.pythonhosted.org/packages/56/15/c25671c7aad70f8179d858c55a6ae8404902abe0cdcf32a29d581792b491/google_crc32c-1.8.0-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b0d1a7afc6e8e4635564ba8aa5c0548e3173e41b6384d7711a9123165f582de2", size = 33381, upload-time = "2025-12-16T00:40:26.268Z" }, + { url = "https://files.pythonhosted.org/packages/42/fa/f50f51260d7b0ef5d4898af122d8a7ec5a84e2984f676f746445f783705f/google_crc32c-1.8.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8b3f68782f3cbd1bce027e48768293072813469af6a61a86f6bb4977a4380f21", size = 33734, upload-time = "2025-12-16T00:40:27.028Z" }, + { url = "https://files.pythonhosted.org/packages/08/a5/7b059810934a09fb3ccb657e0843813c1fee1183d3bc2c8041800374aa2c/google_crc32c-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:d511b3153e7011a27ab6ee6bb3a5404a55b994dc1a7322c0b87b29606d9790e2", size = 34878, upload-time = "2025-12-16T00:35:23.142Z" }, + { url = "https://files.pythonhosted.org/packages/52/c5/c171e4d8c44fec1422d801a6d2e5d7ddabd733eeda505c79730ee9607f07/google_crc32c-1.8.0-pp311-pypy311_pp73-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:87fa445064e7db928226b2e6f0d5304ab4cd0339e664a4e9a25029f384d9bb93", size = 28615, upload-time = "2025-12-16T00:40:29.298Z" }, + { url = "https://files.pythonhosted.org/packages/9c/97/7d75fe37a7a6ed171a2cf17117177e7aab7e6e0d115858741b41e9dd4254/google_crc32c-1.8.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f639065ea2042d5c034bf258a9f085eaa7af0cd250667c0635a3118e8f92c69c", size = 28800, upload-time = "2025-12-16T00:40:30.322Z" }, ] [[package]] @@ -867,22 +1133,15 @@ wheels = [ ] [[package]] -name = "google-generativeai" -version = "0.8.6" +name = "google-resumable-media" +version = "2.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "google-ai-generativelanguage" }, - { name = "google-api-core", version = "2.25.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "google-api-core", version = "2.29.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, - { name = "google-api-python-client" }, - { name = "google-auth" }, - { name = "protobuf" }, - { name = "pydantic" }, - { name = "tqdm" }, - { name = "typing-extensions" }, + { name = "google-crc32c" }, ] +sdist = { url = "https://files.pythonhosted.org/packages/64/d7/520b62a35b23038ff005e334dba3ffc75fcf583bee26723f1fd8fd4b6919/google_resumable_media-2.8.0.tar.gz", hash = "sha256:f1157ed8b46994d60a1bc432544db62352043113684d4e030ee02e77ebe9a1ae", size = 2163265, upload-time = "2025-11-17T15:38:06.659Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/97/0f/ef33b5bb71437966590c6297104c81051feae95d54b11ece08533ef937d3/google_generativeai-0.8.6-py3-none-any.whl", hash = "sha256:37a0eaaa95e5bbf888828e20a4a1b2c196cc9527d194706e58a68ff388aeb0fa", size = 155098, upload-time = "2025-12-16T17:53:58.61Z" }, + { url = "https://files.pythonhosted.org/packages/1f/0b/93afde9cfe012260e9fe1522f35c9b72d6ee222f316586b1f23ecf44d518/google_resumable_media-2.8.0-py3-none-any.whl", hash = "sha256:dd14a116af303845a8d932ddae161a26e86cc229645bc98b39f026f9b1717582", size = 81340, upload-time = "2025-11-17T15:38:05.594Z" }, ] [[package]] @@ -897,6 +1156,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, ] +[package.optional-dependencies] +grpc = [ + { name = "grpcio" }, +] + [[package]] name = "graphtty" version = "0.1.8" @@ -906,6 +1170,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7d/a9/66d01580a4a92b576c056e9967d552a28ed836540eaf73b436474c514bc2/graphtty-0.1.8-py3-none-any.whl", hash = "sha256:4e19e6d66b9ef79e2715377163f61a6542b5b9ee00d50406b80a40d0ba094f67", size = 25474, upload-time = "2026-02-15T12:47:16.377Z" }, ] +[[package]] +name = "grpc-google-iam-v1" +version = "0.14.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos", extra = ["grpc"] }, + { name = "grpcio" }, + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/1e/1011451679a983f2f5c6771a1682542ecb027776762ad031fd0d7129164b/grpc_google_iam_v1-0.14.3.tar.gz", hash = "sha256:879ac4ef33136c5491a6300e27575a9ec760f6cdf9a2518798c1b8977a5dc389", size = 23745, upload-time = "2025-10-15T21:14:53.318Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/bd/330a1bbdb1afe0b96311249e699b6dc9cfc17916394fd4503ac5aca2514b/grpc_google_iam_v1-0.14.3-py3-none-any.whl", hash = "sha256:7a7f697e017a067206a3dfef44e4c634a34d3dee135fe7d7a4613fe3e59217e6", size = 32690, upload-time = "2025-10-15T21:14:51.72Z" }, +] + [[package]] name = "grpcio" version = "1.78.0" @@ -959,16 +1237,16 @@ wheels = [ [[package]] name = "grpcio-status" -version = "1.71.2" +version = "1.78.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "googleapis-common-protos" }, { name = "grpcio" }, { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fd/d1/b6e9877fedae3add1afdeae1f89d1927d296da9cf977eca0eb08fb8a460e/grpcio_status-1.71.2.tar.gz", hash = "sha256:c7a97e176df71cdc2c179cd1847d7fc86cca5832ad12e9798d7fed6b7a1aab50", size = 13677, upload-time = "2025-06-28T04:24:05.426Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/cd/89ce482a931b543b92cdd9b2888805518c4620e0094409acb8c81dd4610a/grpcio_status-1.78.0.tar.gz", hash = "sha256:a34cfd28101bfea84b5aa0f936b4b423019e9213882907166af6b3bddc59e189", size = 13808, upload-time = "2026-02-06T10:01:48.034Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/67/58/317b0134129b556a93a3b0afe00ee675b5657f0155509e22fcb853bafe2d/grpcio_status-1.71.2-py3-none-any.whl", hash = "sha256:803c98cb6a8b7dc6dbb785b1111aed739f241ab5e9da0bba96888aa74704cfd3", size = 14424, upload-time = "2025-06-28T04:23:42.136Z" }, + { url = "https://files.pythonhosted.org/packages/83/8a/1241ec22c41028bddd4a052ae9369267b4475265ad0ce7140974548dc3fa/grpcio_status-1.78.0-py3-none-any.whl", hash = "sha256:b492b693d4bf27b47a6c32590701724f1d3b9444b36491878fb71f6208857f34", size = 14523, upload-time = "2026-02-06T10:01:32.584Z" }, ] [[package]] @@ -993,18 +1271,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, ] -[[package]] -name = "httplib2" -version = "0.31.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyparsing" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c1/1f/e86365613582c027dda5ddb64e1010e57a3d53e99ab8a72093fa13d565ec/httplib2-0.31.2.tar.gz", hash = "sha256:385e0869d7397484f4eab426197a4c020b606edd43372492337c0b4010ae5d24", size = 250800, upload-time = "2026-01-23T11:04:44.165Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2f/90/fd509079dfcab01102c0fdd87f3a9506894bc70afcf9e9785ef6b2b3aff6/httplib2-0.31.2-py3-none-any.whl", hash = "sha256:dbf0c2fa3862acf3c55c078ea9c0bc4481d7dc5117cae71be9514912cf9f8349", size = 91099, upload-time = "2026-01-23T11:04:42.78Z" }, -] - [[package]] name = "httpx" version = "0.28.1" @@ -1029,6 +1295,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, ] +[[package]] +name = "httpx-ws" +version = "0.8.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpcore" }, + { name = "httpx" }, + { name = "wsproto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/32/6f7198f55d94063ea84487a31cdd3e149d2702dc0804fc5de06ed12ef2c2/httpx_ws-0.8.2.tar.gz", hash = "sha256:ba0d4aa76e1c8a27bd5e88984ecdcdc28f7bf30b40cb0989a4c1438d07fa52c7", size = 105734, upload-time = "2025-11-07T12:57:36.566Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/cd/2008972ddc4c2139b9813d8a097e53dcc74b2a16a85b4069294457954232/httpx_ws-0.8.2-py3-none-any.whl", hash = "sha256:f8898ddb84cbf98c562e8e796675bc68c215fa1d453d54a7fcd935aca8198cc8", size = 15404, upload-time = "2025-11-07T12:57:35.176Z" }, +] + [[package]] name = "identify" version = "2.6.16" @@ -1068,6 +1349,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] +[[package]] +name = "isodate" +version = "0.7.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/4d/e940025e2ce31a8ce1202635910747e5a87cc3a6a6bb2d00973375014749/isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6", size = 29705, upload-time = "2024-10-08T23:04:11.5Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/aa/0aca39a37d3c7eb941ba736ede56d689e7be91cab5d9ca846bde3999eba6/isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15", size = 22320, upload-time = "2024-10-08T23:04:09.501Z" }, +] + [[package]] name = "jiter" version = "0.13.0" @@ -1248,6 +1538,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7c/06/c3394327f815fade875724c0f6cff529777c96a1e17fea066deb997f8cf5/langchain-1.2.10-py3-none-any.whl", hash = "sha256:e07a377204451fffaed88276b8193e894893b1003e25c5bca6539288ccca3698", size = 111738, upload-time = "2026-02-10T14:56:47.985Z" }, ] +[[package]] +name = "langchain-anthropic" +version = "1.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anthropic" }, + { name = "langchain-core" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/48/cf217b3836099220737ff1f8fd07a554993080dfc9c0b4dd4af16ccb0604/langchain_anthropic-1.3.3.tar.gz", hash = "sha256:37198413c9bde5a9e9829f13c7b9ed4870d7085e7fba9fd803ef4d98ef8ea220", size = 686916, upload-time = "2026-02-10T21:02:28.924Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/f1/cf56d47964b6fe080cdc54c3e32bc05e560927d549b2634b39d14aaf6e05/langchain_anthropic-1.3.3-py3-none-any.whl", hash = "sha256:8008ce5fb680268681673e09f93a9ac08eba9e304477101e5e138f06b5cd8710", size = 46831, upload-time = "2026-02-10T21:02:27.386Z" }, +] + [[package]] name = "langchain-aws" version = "1.2.5" @@ -1263,6 +1567,28 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f0/33/059753e0265a868de5aec9280e8025753c98191d2d140e55e01fbe75fc0a/langchain_aws-1.2.5-py3-none-any.whl", hash = "sha256:2c04a43d609046f8fb31ab44347a333a9f8b1a73bbcad383db99219a365ca287", size = 165761, upload-time = "2026-02-11T18:33:11.271Z" }, ] +[[package]] +name = "langchain-azure-ai" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "azure-ai-agents" }, + { name = "azure-ai-inference", extra = ["opentelemetry"] }, + { name = "azure-ai-projects" }, + { name = "azure-core" }, + { name = "azure-cosmos" }, + { name = "azure-identity" }, + { name = "azure-search-documents" }, + { name = "langchain" }, + { name = "langchain-openai" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/12/d9/605679c840874fa7f038dca973a465c0f7825f59897d8a7d4be57991f293/langchain_azure_ai-1.0.0.tar.gz", hash = "sha256:98426c4a00c41a6d5a3d75fc2c41f9468fda2a73f79a7648236c9d554bc382c1", size = 83591, upload-time = "2025-10-22T16:39:40.548Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/22/d23485a86bacddae9c6fe5c7c24aca0308c771075583db1dbb075278f73c/langchain_azure_ai-1.0.0-py3-none-any.whl", hash = "sha256:8055e62df0b64de6350a2c035df7c226fc3e6e813ecf7d7f34bf4d498226875e", size = 100255, upload-time = "2025-10-22T16:39:39.503Z" }, +] + [[package]] name = "langchain-core" version = "1.2.13" @@ -1282,6 +1608,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/12/ab/60fd69e5d55f67d422baefddaaca523c42cd7510ab6aeb17db6ae57fb107/langchain_core-1.2.13-py3-none-any.whl", hash = "sha256:b31823e28d3eff1e237096d0bd3bf80c6f9624eb471a9496dbfbd427779f8d82", size = 500485, upload-time = "2026-02-15T07:45:55.422Z" }, ] +[[package]] +name = "langchain-fireworks" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "fireworks-ai" }, + { name = "langchain-core" }, + { name = "openai" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/6d/544b1175f535e7d831db963f8107c7940944e76609d5a0ae3751b0afb8f7/langchain_fireworks-1.1.0.tar.gz", hash = "sha256:04db3b2fd390f113e2026375d9a60fdc46e456920d4c98b3d7a6f70ce16bf7ed", size = 169062, upload-time = "2025-11-24T14:09:18.484Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/4a/7e60c509f8a2224c7c77e5d0ad4a7c15ea64cdb31f6bd709299a65dc1316/langchain_fireworks-1.1.0-py3-none-any.whl", hash = "sha256:fa063d4413a2eb46bf7ec6d7473f6ac16c8015f61bbbc7aa4912ebf82d0236fe", size = 18331, upload-time = "2025-11-24T14:09:17.47Z" }, +] + [[package]] name = "langchain-google-genai" version = "4.2.0" @@ -1297,6 +1639,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/22/51/39942c0083139652494bb354dddf0ed397703a4882302f7b48aeca531c96/langchain_google_genai-4.2.0-py3-none-any.whl", hash = "sha256:856041aaafceff65a4ef0d5acf5731f2db95229ff041132af011aec51e8279d9", size = 66452, upload-time = "2026-01-13T20:41:16.296Z" }, ] +[[package]] +name = "langchain-google-vertexai" +version = "3.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bottleneck" }, + { name = "google-cloud-aiplatform" }, + { name = "google-cloud-storage" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "langchain-core" }, + { name = "numexpr" }, + { name = "pyarrow" }, + { name = "pydantic" }, + { name = "validators" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/5f/55a5b568104c32e265970d4217083d76252c2140f532f382bb42f35886a8/langchain_google_vertexai-3.2.1.tar.gz", hash = "sha256:8913e8aa7ca300eb7d9b8681ba2487dad787debe2511a903a249dc03709720d2", size = 360287, upload-time = "2026-01-05T21:47:58.287Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/90/e2b1493df6ad06a7c1194f6d8238ddbd7fedc0cfe1e32d1c3980903c9d05/langchain_google_vertexai-3.2.1-py3-none-any.whl", hash = "sha256:57a25680290060c896fb740bdaafa987e0518b599f793b3dfb2d07a7aec97bd8", size = 103650, upload-time = "2026-01-05T21:47:57.136Z" }, +] + [[package]] name = "langchain-mcp-adapters" version = "0.2.1" @@ -1552,6 +1915,32 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ff/f0/fc3475ef7732d080c6a555e4ce12e606c699e82e220eb4bcafb9e801fc07/mockito-1.5.5-py3-none-any.whl", hash = "sha256:422a6ce2666e3c32d756547b98ee67e83a339f457e132476689643015845fc36", size = 30456, upload-time = "2025-11-17T11:28:40.371Z" }, ] +[[package]] +name = "msal" +version = "1.34.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "pyjwt", extra = ["crypto"] }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/0e/c857c46d653e104019a84f22d4494f2119b4fe9f896c92b4b864b3b045cc/msal-1.34.0.tar.gz", hash = "sha256:76ba83b716ea5a6d75b0279c0ac353a0e05b820ca1f6682c0eb7f45190c43c2f", size = 153961, upload-time = "2025-09-22T23:05:48.989Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/dc/18d48843499e278538890dc709e9ee3dea8375f8be8e82682851df1b48b5/msal-1.34.0-py3-none-any.whl", hash = "sha256:f669b1644e4950115da7a176441b0e13ec2975c29528d8b9e81316023676d6e1", size = 116987, upload-time = "2025-09-22T23:05:47.294Z" }, +] + +[[package]] +name = "msal-extensions" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "msal" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/99/5d239b6156eddf761a636bded1118414d161bd6b7b37a9335549ed159396/msal_extensions-1.3.1.tar.gz", hash = "sha256:c5b0fd10f65ef62b5f1d62f4251d51cbcaf003fcedae8c91b040a488614be1a4", size = 23315, upload-time = "2025-03-14T23:51:03.902Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/75/bd9b7bb966668920f06b200e84454c8f3566b102183bc55c5473d96cb2b9/msal_extensions-1.3.1-py3-none-any.whl", hash = "sha256:96d3de4d034504e969ac5e85bae8106c8373b5c6568e4c8fa7af2eca9dbe6bca", size = 20583, upload-time = "2025-03-14T23:51:03.016Z" }, +] + [[package]] name = "msgpack" version = "1.1.2" @@ -1779,6 +2168,65 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, ] +[[package]] +name = "numexpr" +version = "2.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/2f/fdba158c9dbe5caca9c3eca3eaffffb251f2fb8674bf8e2d0aed5f38d319/numexpr-2.14.1.tar.gz", hash = "sha256:4be00b1086c7b7a5c32e31558122b7b80243fe098579b170967da83f3152b48b", size = 119400, upload-time = "2025-10-13T16:17:27.351Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/a3/67999bdd1ed1f938d38f3fedd4969632f2f197b090e50505f7cc1fa82510/numexpr-2.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2d03fcb4644a12f70a14d74006f72662824da5b6128bf1bcd10cc3ed80e64c34", size = 163195, upload-time = "2025-10-13T16:16:31.212Z" }, + { url = "https://files.pythonhosted.org/packages/25/95/d64f680ea1fc56d165457287e0851d6708800f9fcea346fc1b9957942ee6/numexpr-2.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2773ee1133f77009a1fc2f34fe236f3d9823779f5f75450e183137d49f00499f", size = 152088, upload-time = "2025-10-13T16:16:33.186Z" }, + { url = "https://files.pythonhosted.org/packages/0e/7f/3bae417cb13ae08afd86d08bb0301c32440fe0cae4e6262b530e0819aeda/numexpr-2.14.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebe4980f9494b9f94d10d2e526edc29e72516698d3bf95670ba79415492212a4", size = 451126, upload-time = "2025-10-13T16:13:22.248Z" }, + { url = "https://files.pythonhosted.org/packages/4c/1a/edbe839109518364ac0bd9e918cf874c755bb2c128040e920f198c494263/numexpr-2.14.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2a381e5e919a745c9503bcefffc1c7f98c972c04ec58fc8e999ed1a929e01ba6", size = 442012, upload-time = "2025-10-13T16:14:51.416Z" }, + { url = "https://files.pythonhosted.org/packages/66/b1/be4ce99bff769a5003baddac103f34681997b31d4640d5a75c0e8ed59c78/numexpr-2.14.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d08856cfc1b440eb1caaa60515235369654321995dd68eb9377577392020f6cb", size = 1415975, upload-time = "2025-10-13T16:13:26.088Z" }, + { url = "https://files.pythonhosted.org/packages/e7/33/b33b8fdc032a05d9ebb44a51bfcd4b92c178a2572cd3e6c1b03d8a4b45b2/numexpr-2.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03130afa04edf83a7b590d207444f05a00363c9b9ea5d81c0f53b1ea13fad55a", size = 1464683, upload-time = "2025-10-13T16:14:58.87Z" }, + { url = "https://files.pythonhosted.org/packages/d0/b2/ddcf0ac6cf0a1d605e5aecd4281507fd79a9628a67896795ab2e975de5df/numexpr-2.14.1-cp311-cp311-win32.whl", hash = "sha256:db78fa0c9fcbaded3ae7453faf060bd7a18b0dc10299d7fcd02d9362be1213ed", size = 166838, upload-time = "2025-10-13T16:17:06.765Z" }, + { url = "https://files.pythonhosted.org/packages/64/72/4ca9bd97b2eb6dce9f5e70a3b6acec1a93e1fb9b079cb4cba2cdfbbf295d/numexpr-2.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:e9b2f957798c67a2428be96b04bce85439bed05efe78eb78e4c2ca43737578e7", size = 160069, upload-time = "2025-10-13T16:17:08.752Z" }, + { url = "https://files.pythonhosted.org/packages/9d/20/c473fc04a371f5e2f8c5749e04505c13e7a8ede27c09e9f099b2ad6f43d6/numexpr-2.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:91ebae0ab18c799b0e6b8c5a8d11e1fa3848eb4011271d99848b297468a39430", size = 162790, upload-time = "2025-10-13T16:16:34.903Z" }, + { url = "https://files.pythonhosted.org/packages/45/93/b6760dd1904c2a498e5f43d1bb436f59383c3ddea3815f1461dfaa259373/numexpr-2.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:47041f2f7b9e69498fb311af672ba914a60e6e6d804011caacb17d66f639e659", size = 152196, upload-time = "2025-10-13T16:16:36.593Z" }, + { url = "https://files.pythonhosted.org/packages/72/94/cc921e35593b820521e464cbbeaf8212bbdb07f16dc79fe283168df38195/numexpr-2.14.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d686dfb2c1382d9e6e0ee0b7647f943c1886dba3adbf606c625479f35f1956c1", size = 452468, upload-time = "2025-10-13T16:13:29.531Z" }, + { url = "https://files.pythonhosted.org/packages/d9/43/560e9ba23c02c904b5934496486d061bcb14cd3ebba2e3cf0e2dccb6c22b/numexpr-2.14.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eee6d4fbbbc368e6cdd0772734d6249128d957b3b8ad47a100789009f4de7083", size = 443631, upload-time = "2025-10-13T16:15:02.473Z" }, + { url = "https://files.pythonhosted.org/packages/7b/6c/78f83b6219f61c2c22d71ab6e6c2d4e5d7381334c6c29b77204e59edb039/numexpr-2.14.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3a2839efa25f3c8d4133252ea7342d8f81226c7c4dda81f97a57e090b9d87a48", size = 1417670, upload-time = "2025-10-13T16:13:33.464Z" }, + { url = "https://files.pythonhosted.org/packages/0e/bb/1ccc9dcaf46281568ce769888bf16294c40e98a5158e4b16c241de31d0d3/numexpr-2.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9f9137f1351b310436662b5dc6f4082a245efa8950c3b0d9008028df92fefb9b", size = 1466212, upload-time = "2025-10-13T16:15:12.828Z" }, + { url = "https://files.pythonhosted.org/packages/31/9f/203d82b9e39dadd91d64bca55b3c8ca432e981b822468dcef41a4418626b/numexpr-2.14.1-cp312-cp312-win32.whl", hash = "sha256:36f8d5c1bd1355df93b43d766790f9046cccfc1e32b7c6163f75bcde682cda07", size = 166996, upload-time = "2025-10-13T16:17:10.369Z" }, + { url = "https://files.pythonhosted.org/packages/1f/67/ffe750b5452eb66de788c34e7d21ec6d886abb4d7c43ad1dc88ceb3d998f/numexpr-2.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:fdd886f4b7dbaf167633ee396478f0d0aa58ea2f9e7ccc3c6431019623e8d68f", size = 160187, upload-time = "2025-10-13T16:17:11.974Z" }, + { url = "https://files.pythonhosted.org/packages/73/b4/9f6d637fd79df42be1be29ee7ba1f050fab63b7182cb922a0e08adc12320/numexpr-2.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:09078ba73cffe94745abfbcc2d81ab8b4b4e9d7bfbbde6cac2ee5dbf38eee222", size = 162794, upload-time = "2025-10-13T16:16:38.291Z" }, + { url = "https://files.pythonhosted.org/packages/35/ae/d58558d8043de0c49f385ea2fa789e3cfe4d436c96be80200c5292f45f15/numexpr-2.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dce0b5a0447baa7b44bc218ec2d7dcd175b8eee6083605293349c0c1d9b82fb6", size = 152203, upload-time = "2025-10-13T16:16:39.907Z" }, + { url = "https://files.pythonhosted.org/packages/13/65/72b065f9c75baf8f474fd5d2b768350935989d4917db1c6c75b866d4067c/numexpr-2.14.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:06855053de7a3a8425429bd996e8ae3c50b57637ad3e757e0fa0602a7874be30", size = 455860, upload-time = "2025-10-13T16:13:35.811Z" }, + { url = "https://files.pythonhosted.org/packages/fc/f9/c9457652dfe28e2eb898372da2fe786c6db81af9540c0f853ee04a0699cc/numexpr-2.14.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f9366d23a2e991fd5a8b5e61a17558f028ba86158a4552f8f239b005cdf83c", size = 446574, upload-time = "2025-10-13T16:15:17.367Z" }, + { url = "https://files.pythonhosted.org/packages/b6/99/8d3879c4d67d3db5560cf2de65ce1778b80b75f6fa415eb5c3e7bd37ba27/numexpr-2.14.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c5f1b1605695778896534dfc6e130d54a65cd52be7ed2cd0cfee3981fd676bf5", size = 1417306, upload-time = "2025-10-13T16:13:42.813Z" }, + { url = "https://files.pythonhosted.org/packages/ea/05/6bddac9f18598ba94281e27a6943093f7d0976544b0cb5d92272c64719bd/numexpr-2.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a4ba71db47ea99c659d88ee6233fa77b6dc83392f1d324e0c90ddf617ae3f421", size = 1466145, upload-time = "2025-10-13T16:15:27.464Z" }, + { url = "https://files.pythonhosted.org/packages/24/5d/cbeb67aca0c5a76ead13df7e8bd8dd5e0d49145f90da697ba1d9f07005b0/numexpr-2.14.1-cp313-cp313-win32.whl", hash = "sha256:638dce8320f4a1483d5ca4fda69f60a70ed7e66be6e68bc23fb9f1a6b78a9e3b", size = 166996, upload-time = "2025-10-13T16:17:13.803Z" }, + { url = "https://files.pythonhosted.org/packages/cc/23/9281bceaeb282cead95f0aa5f7f222ffc895670ea689cc1398355f6e3001/numexpr-2.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fdcd4735121658a313f878fd31136d1bfc6a5b913219e7274e9fca9f8dac3bb", size = 160189, upload-time = "2025-10-13T16:17:15.417Z" }, + { url = "https://files.pythonhosted.org/packages/f3/76/7aac965fd93a56803cbe502aee2adcad667253ae34b0badf6c5af7908b6c/numexpr-2.14.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:557887ad7f5d3c2a40fd7310e50597045a68e66b20a77b3f44d7bc7608523b4b", size = 163524, upload-time = "2025-10-13T16:16:42.213Z" }, + { url = "https://files.pythonhosted.org/packages/58/65/79d592d5e63fbfab3b59a60c386853d9186a44a3fa3c87ba26bdc25b6195/numexpr-2.14.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:af111c8fe6fc55d15e4c7cab11920fc50740d913636d486545b080192cd0ad73", size = 152919, upload-time = "2025-10-13T16:16:44.229Z" }, + { url = "https://files.pythonhosted.org/packages/84/78/3c8335f713d4aeb99fa758d7c62f0be1482d4947ce5b508e2052bb7aeee9/numexpr-2.14.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:33265294376e7e2ae4d264d75b798a915d2acf37b9dd2b9405e8b04f84d05cfc", size = 465972, upload-time = "2025-10-13T16:13:45.061Z" }, + { url = "https://files.pythonhosted.org/packages/35/81/9ee5f69b811e8f18746c12d6f71848617684edd3161927f95eee7a305631/numexpr-2.14.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:83647d846d3eeeb9a9255311236135286728b398d0d41d35dedb532dca807fe9", size = 456953, upload-time = "2025-10-13T16:15:31.186Z" }, + { url = "https://files.pythonhosted.org/packages/6d/39/9b8bc6e294d85cbb54a634e47b833e9f3276a8bdf7ce92aa808718a0212d/numexpr-2.14.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6e575fd3ad41ddf3355d0c7ef6bd0168619dc1779a98fe46693cad5e95d25e6e", size = 1426199, upload-time = "2025-10-13T16:13:48.231Z" }, + { url = "https://files.pythonhosted.org/packages/1e/ce/0d4fcd31ab49319740d934fba1734d7dad13aa485532ca754e555ca16c8b/numexpr-2.14.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:67ea4771029ce818573b1998f5ca416bd255156feea017841b86176a938f7d19", size = 1474214, upload-time = "2025-10-13T16:15:38.893Z" }, + { url = "https://files.pythonhosted.org/packages/b7/47/b2a93cbdb3ba4e009728ad1b9ef1550e2655ea2c86958ebaf03b9615f275/numexpr-2.14.1-cp313-cp313t-win32.whl", hash = "sha256:15015d47d3d1487072d58c0e7682ef2eb608321e14099c39d52e2dd689483611", size = 167676, upload-time = "2025-10-13T16:17:17.351Z" }, + { url = "https://files.pythonhosted.org/packages/86/99/ee3accc589ed032eea68e12172515ed96a5568534c213ad109e1f4411df1/numexpr-2.14.1-cp313-cp313t-win_amd64.whl", hash = "sha256:94c711f6d8f17dfb4606842b403699603aa591ab9f6bf23038b488ea9cfb0f09", size = 161096, upload-time = "2025-10-13T16:17:19.174Z" }, + { url = "https://files.pythonhosted.org/packages/ac/36/9db78dfbfdfa1f8bf0872993f1a334cdd8fca5a5b6567e47dcb128bcb7c2/numexpr-2.14.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ede79f7ff06629f599081de644546ce7324f1581c09b0ac174da88a470d39c21", size = 162848, upload-time = "2025-10-13T16:16:46.216Z" }, + { url = "https://files.pythonhosted.org/packages/13/c1/a5c78ae637402c5550e2e0ba175275d2515d432ec28af0cdc23c9b476e65/numexpr-2.14.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2eac7a5a2f70b3768c67056445d1ceb4ecd9b853c8eda9563823b551aeaa5082", size = 152270, upload-time = "2025-10-13T16:16:47.92Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ed/aabd8678077848dd9a751c5558c2057839f5a09e2a176d8dfcd0850ee00e/numexpr-2.14.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5aedf38d4c0c19d3cecfe0334c3f4099fb496f54c146223d30fa930084bc8574", size = 455918, upload-time = "2025-10-13T16:13:50.338Z" }, + { url = "https://files.pythonhosted.org/packages/88/e1/3db65117f02cdefb0e5e4c440daf1c30beb45051b7f47aded25b7f4f2f34/numexpr-2.14.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:439ec4d57b853792ebe5456e3160312281c3a7071ecac5532ded3278ede614de", size = 446512, upload-time = "2025-10-13T16:15:42.313Z" }, + { url = "https://files.pythonhosted.org/packages/9a/fb/7ceb9ee55b5f67e4a3e4d73d5af4c7e37e3c9f37f54bee90361b64b17e3f/numexpr-2.14.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:e23b87f744e04e302d82ac5e2189ae20a533566aec76a46885376e20b0645bf8", size = 1417845, upload-time = "2025-10-13T16:13:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/45/2d/9b5764d0eafbbb2889288f80de773791358acf6fad1a55767538d8b79599/numexpr-2.14.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:44f84e0e5af219dbb62a081606156420815890e041b87252fbcea5df55214c4c", size = 1466211, upload-time = "2025-10-13T16:15:48.985Z" }, + { url = "https://files.pythonhosted.org/packages/5d/21/204db708eccd71aa8bc55bcad55bc0fc6c5a4e01ad78e14ee5714a749386/numexpr-2.14.1-cp314-cp314-win32.whl", hash = "sha256:1f1a5e817c534539351aa75d26088e9e1e0ef1b3a6ab484047618a652ccc4fc3", size = 168835, upload-time = "2025-10-13T16:17:20.82Z" }, + { url = "https://files.pythonhosted.org/packages/4f/3e/d83e9401a1c3449a124f7d4b3fb44084798e0d30f7c11e60712d9b94cf11/numexpr-2.14.1-cp314-cp314-win_amd64.whl", hash = "sha256:587c41509bc373dfb1fe6086ba55a73147297247bedb6d588cda69169fc412f2", size = 162608, upload-time = "2025-10-13T16:17:22.228Z" }, + { url = "https://files.pythonhosted.org/packages/7f/d6/ec947806bb57836d6379a8c8a253c2aeaa602b12fef2336bfd2462bb4ed5/numexpr-2.14.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:ec368819502b64f190c3f71be14a304780b5935c42aae5bf22c27cc2cbba70b5", size = 163525, upload-time = "2025-10-13T16:16:50.133Z" }, + { url = "https://files.pythonhosted.org/packages/0d/77/048f30dcf661a3d52963a88c29b52b6d5ce996d38e9313a56a922451c1e0/numexpr-2.14.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7e87f6d203ac57239de32261c941e9748f9309cbc0da6295eabd0c438b920d3a", size = 152917, upload-time = "2025-10-13T16:16:52.055Z" }, + { url = "https://files.pythonhosted.org/packages/9e/d3/956a13e628d722d649fbf2fded615134a308c082e122a48bad0e90a99ce9/numexpr-2.14.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dd72d8c2a165fe45ea7650b16eb8cc1792a94a722022006bb97c86fe51fd2091", size = 466242, upload-time = "2025-10-13T16:13:55.795Z" }, + { url = "https://files.pythonhosted.org/packages/d6/dd/abe848678d82486940892f2cacf39e82eec790e8930d4d713d3f9191063b/numexpr-2.14.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:70d80fcb418a54ca208e9a38e58ddc425c07f66485176b261d9a67c7f2864f73", size = 457149, upload-time = "2025-10-13T16:15:52.036Z" }, + { url = "https://files.pythonhosted.org/packages/fd/bb/797b583b5fb9da5700a5708ca6eb4f889c94d81abb28de4d642c0f4b3258/numexpr-2.14.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:edea2f20c2040df8b54ee8ca8ebda63de9545b2112872466118e9df4d0ae99f3", size = 1426493, upload-time = "2025-10-13T16:13:59.244Z" }, + { url = "https://files.pythonhosted.org/packages/77/c4/0519ab028fdc35e3e7ee700def7f2b4631b175cd9e1202bd7966c1695c33/numexpr-2.14.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:790447be6879a6c51b9545f79612d24c9ea0a41d537a84e15e6a8ddef0b6268e", size = 1474413, upload-time = "2025-10-13T16:15:59.211Z" }, + { url = "https://files.pythonhosted.org/packages/d4/4a/33044878c8f4a75213cfe9c11d4c02058bb710a7a063fe14f362e8de1077/numexpr-2.14.1-cp314-cp314t-win32.whl", hash = "sha256:538961096c2300ea44240209181e31fae82759d26b51713b589332b9f2a4117e", size = 169502, upload-time = "2025-10-13T16:17:23.829Z" }, + { url = "https://files.pythonhosted.org/packages/41/a2/5a1a2c72528b429337f49911b18c302ecd36eeab00f409147e1aa4ae4519/numexpr-2.14.1-cp314-cp314t-win_amd64.whl", hash = "sha256:a40b350cd45b4446076fa11843fa32bbe07024747aeddf6d467290bf9011b392", size = 163589, upload-time = "2025-10-13T16:17:25.696Z" }, +] + [[package]] name = "numpy" version = "2.4.2" @@ -2116,6 +2564,93 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" }, ] +[[package]] +name = "pillow" +version = "12.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/46/5da1ec4a5171ee7bf1a0efa064aba70ba3d6e0788ce3f5acd1375d23c8c0/pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32", size = 5304084, upload-time = "2026-02-11T04:20:27.501Z" }, + { url = "https://files.pythonhosted.org/packages/78/93/a29e9bc02d1cf557a834da780ceccd54e02421627200696fcf805ebdc3fb/pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38", size = 4657866, upload-time = "2026-02-11T04:20:29.827Z" }, + { url = "https://files.pythonhosted.org/packages/13/84/583a4558d492a179d31e4aae32eadce94b9acf49c0337c4ce0b70e0a01f2/pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5", size = 6232148, upload-time = "2026-02-11T04:20:31.329Z" }, + { url = "https://files.pythonhosted.org/packages/d5/e2/53c43334bbbb2d3b938978532fbda8e62bb6e0b23a26ce8592f36bcc4987/pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090", size = 8038007, upload-time = "2026-02-11T04:20:34.225Z" }, + { url = "https://files.pythonhosted.org/packages/b8/a6/3d0e79c8a9d58150dd98e199d7c1c56861027f3829a3a60b3c2784190180/pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af", size = 6345418, upload-time = "2026-02-11T04:20:35.858Z" }, + { url = "https://files.pythonhosted.org/packages/a2/c8/46dfeac5825e600579157eea177be43e2f7ff4a99da9d0d0a49533509ac5/pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b", size = 7034590, upload-time = "2026-02-11T04:20:37.91Z" }, + { url = "https://files.pythonhosted.org/packages/af/bf/e6f65d3db8a8bbfeaf9e13cc0417813f6319863a73de934f14b2229ada18/pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5", size = 6458655, upload-time = "2026-02-11T04:20:39.496Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c2/66091f3f34a25894ca129362e510b956ef26f8fb67a0e6417bc5744e56f1/pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d", size = 7159286, upload-time = "2026-02-11T04:20:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5a/24bc8eb526a22f957d0cec6243146744966d40857e3d8deb68f7902ca6c1/pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c", size = 6328663, upload-time = "2026-02-11T04:20:43.184Z" }, + { url = "https://files.pythonhosted.org/packages/31/03/bef822e4f2d8f9d7448c133d0a18185d3cce3e70472774fffefe8b0ed562/pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563", size = 7031448, upload-time = "2026-02-11T04:20:44.696Z" }, + { url = "https://files.pythonhosted.org/packages/49/70/f76296f53610bd17b2e7d31728b8b7825e3ac3b5b3688b51f52eab7c0818/pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80", size = 2453651, upload-time = "2026-02-11T04:20:46.243Z" }, + { url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" }, + { url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" }, + { url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" }, + { url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" }, + { url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" }, + { url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" }, + { url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" }, + { url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" }, + { url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" }, + { url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" }, + { url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" }, + { url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" }, + { url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" }, + { url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" }, + { url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" }, + { url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" }, + { url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" }, + { url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" }, + { url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" }, + { url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" }, + { url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" }, + { url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" }, + { url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" }, + { url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" }, + { url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" }, + { url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" }, + { url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" }, + { url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" }, + { url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" }, + { url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" }, + { url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" }, + { url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" }, + { url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" }, + { url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" }, + { url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" }, + { url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" }, + { url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" }, + { url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" }, + { url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" }, + { url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" }, + { url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" }, + { url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" }, + { url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" }, + { url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" }, + { url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" }, + { url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" }, + { url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" }, + { url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" }, + { url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" }, + { url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" }, + { url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" }, + { url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" }, + { url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" }, + { url = "https://files.pythonhosted.org/packages/56/11/5d43209aa4cb58e0cc80127956ff1796a68b928e6324bbf06ef4db34367b/pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f", size = 5228606, upload-time = "2026-02-11T04:22:52.106Z" }, + { url = "https://files.pythonhosted.org/packages/5f/d5/3b005b4e4fda6698b371fa6c21b097d4707585d7db99e98d9b0b87ac612a/pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9", size = 4622321, upload-time = "2026-02-11T04:22:53.827Z" }, + { url = "https://files.pythonhosted.org/packages/df/36/ed3ea2d594356fd8037e5a01f6156c74bc8d92dbb0fa60746cc96cabb6e8/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e", size = 5247579, upload-time = "2026-02-11T04:22:56.094Z" }, + { url = "https://files.pythonhosted.org/packages/54/9a/9cc3e029683cf6d20ae5085da0dafc63148e3252c2f13328e553aaa13cfb/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9", size = 6989094, upload-time = "2026-02-11T04:22:58.288Z" }, + { url = "https://files.pythonhosted.org/packages/00/98/fc53ab36da80b88df0967896b6c4b4cd948a0dc5aa40a754266aa3ae48b3/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3", size = 5313850, upload-time = "2026-02-11T04:23:00.554Z" }, + { url = "https://files.pythonhosted.org/packages/30/02/00fa585abfd9fe9d73e5f6e554dc36cc2b842898cbfc46d70353dae227f8/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735", size = 5963343, upload-time = "2026-02-11T04:23:02.934Z" }, + { url = "https://files.pythonhosted.org/packages/f2/26/c56ce33ca856e358d27fda9676c055395abddb82c35ac0f593877ed4562e/pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e", size = 7029880, upload-time = "2026-02-11T04:23:04.783Z" }, +] + [[package]] name = "platformdirs" version = "4.9.2" @@ -2272,16 +2807,67 @@ wheels = [ [[package]] name = "protobuf" -version = "5.29.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7e/57/394a763c103e0edf87f0938dafcd918d53b4c011dfc5c8ae80f3b0452dbb/protobuf-5.29.6.tar.gz", hash = "sha256:da9ee6a5424b6b30fd5e45c5ea663aef540ca95f9ad99d1e887e819cdf9b8723", size = 425623, upload-time = "2026-02-04T22:54:40.584Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/88/9ee58ff7863c479d6f8346686d4636dd4c415b0cbeed7a6a7d0617639c2a/protobuf-5.29.6-cp310-abi3-win32.whl", hash = "sha256:62e8a3114992c7c647bce37dcc93647575fc52d50e48de30c6fcb28a6a291eb1", size = 423357, upload-time = "2026-02-04T22:54:25.805Z" }, - { url = "https://files.pythonhosted.org/packages/1c/66/2dc736a4d576847134fb6d80bd995c569b13cdc7b815d669050bf0ce2d2c/protobuf-5.29.6-cp310-abi3-win_amd64.whl", hash = "sha256:7e6ad413275be172f67fdee0f43484b6de5a904cc1c3ea9804cb6fe2ff366eda", size = 435175, upload-time = "2026-02-04T22:54:28.592Z" }, - { url = "https://files.pythonhosted.org/packages/06/db/49b05966fd208ae3f44dcd33837b6243b4915c57561d730a43f881f24dea/protobuf-5.29.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:b5a169e664b4057183a34bdc424540e86eea47560f3c123a0d64de4e137f9269", size = 418619, upload-time = "2026-02-04T22:54:30.266Z" }, - { url = "https://files.pythonhosted.org/packages/b7/d7/48cbf6b0c3c39761e47a99cb483405f0fde2be22cf00d71ef316ce52b458/protobuf-5.29.6-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:a8866b2cff111f0f863c1b3b9e7572dc7eaea23a7fae27f6fc613304046483e6", size = 320284, upload-time = "2026-02-04T22:54:31.782Z" }, - { url = "https://files.pythonhosted.org/packages/e3/dd/cadd6ec43069247d91f6345fa7a0d2858bef6af366dbd7ba8f05d2c77d3b/protobuf-5.29.6-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:e3387f44798ac1106af0233c04fb8abf543772ff241169946f698b3a9a3d3ab9", size = 320478, upload-time = "2026-02-04T22:54:32.909Z" }, - { url = "https://files.pythonhosted.org/packages/5a/cb/e3065b447186cb70aa65acc70c86baf482d82bf75625bf5a2c4f6919c6a3/protobuf-5.29.6-py3-none-any.whl", hash = "sha256:6b9edb641441b2da9fa8f428760fc136a49cf97a52076010cf22a2ff73438a86", size = 173126, upload-time = "2026-02-04T22:54:39.462Z" }, +version = "6.33.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/25/7c72c307aafc96fa87062aa6291d9f7c94836e43214d43722e86037aac02/protobuf-6.33.5.tar.gz", hash = "sha256:6ddcac2a081f8b7b9642c09406bc6a4290128fce5f471cddd165960bb9119e5c", size = 444465, upload-time = "2026-01-29T21:51:33.494Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/79/af92d0a8369732b027e6d6084251dd8e782c685c72da161bd4a2e00fbabb/protobuf-6.33.5-cp310-abi3-win32.whl", hash = "sha256:d71b040839446bac0f4d162e758bea99c8251161dae9d0983a3b88dee345153b", size = 425769, upload-time = "2026-01-29T21:51:21.751Z" }, + { url = "https://files.pythonhosted.org/packages/55/75/bb9bc917d10e9ee13dee8607eb9ab963b7cf8be607c46e7862c748aa2af7/protobuf-6.33.5-cp310-abi3-win_amd64.whl", hash = "sha256:3093804752167bcab3998bec9f1048baae6e29505adaf1afd14a37bddede533c", size = 437118, upload-time = "2026-01-29T21:51:24.022Z" }, + { url = "https://files.pythonhosted.org/packages/a2/6b/e48dfc1191bc5b52950246275bf4089773e91cb5ba3592621723cdddca62/protobuf-6.33.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:a5cb85982d95d906df1e2210e58f8e4f1e3cdc088e52c921a041f9c9a0386de5", size = 427766, upload-time = "2026-01-29T21:51:25.413Z" }, + { url = "https://files.pythonhosted.org/packages/4e/b1/c79468184310de09d75095ed1314b839eb2f72df71097db9d1404a1b2717/protobuf-6.33.5-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:9b71e0281f36f179d00cbcb119cb19dec4d14a81393e5ea220f64b286173e190", size = 324638, upload-time = "2026-01-29T21:51:26.423Z" }, + { url = "https://files.pythonhosted.org/packages/c5/f5/65d838092fd01c44d16037953fd4c2cc851e783de9b8f02b27ec4ffd906f/protobuf-6.33.5-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8afa18e1d6d20af15b417e728e9f60f3aa108ee76f23c3b2c07a2c3b546d3afd", size = 339411, upload-time = "2026-01-29T21:51:27.446Z" }, + { url = "https://files.pythonhosted.org/packages/9b/53/a9443aa3ca9ba8724fdfa02dd1887c1bcd8e89556b715cfbacca6b63dbec/protobuf-6.33.5-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:cbf16ba3350fb7b889fca858fb215967792dc125b35c7976ca4818bee3521cf0", size = 323465, upload-time = "2026-01-29T21:51:28.925Z" }, + { url = "https://files.pythonhosted.org/packages/57/bf/2086963c69bdac3d7cff1cc7ff79b8ce5ea0bec6797a017e1be338a46248/protobuf-6.33.5-py3-none-any.whl", hash = "sha256:69915a973dd0f60f31a08b8318b73eab2bd6a392c79184b3612226b0a3f8ec02", size = 170687, upload-time = "2026-01-29T21:51:32.557Z" }, +] + +[[package]] +name = "pyarrow" +version = "22.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/53/04a7fdc63e6056116c9ddc8b43bc28c12cdd181b85cbeadb79278475f3ae/pyarrow-22.0.0.tar.gz", hash = "sha256:3d600dc583260d845c7d8a6db540339dd883081925da2bd1c5cb808f720b3cd9", size = 1151151, upload-time = "2025-10-24T12:30:00.762Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/b7/18f611a8cdc43417f9394a3ccd3eace2f32183c08b9eddc3d17681819f37/pyarrow-22.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:3e294c5eadfb93d78b0763e859a0c16d4051fc1c5231ae8956d61cb0b5666f5a", size = 34272022, upload-time = "2025-10-24T10:04:28.973Z" }, + { url = "https://files.pythonhosted.org/packages/26/5c/f259e2526c67eb4b9e511741b19870a02363a47a35edbebc55c3178db22d/pyarrow-22.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:69763ab2445f632d90b504a815a2a033f74332997052b721002298ed6de40f2e", size = 35995834, upload-time = "2025-10-24T10:04:35.467Z" }, + { url = "https://files.pythonhosted.org/packages/50/8d/281f0f9b9376d4b7f146913b26fac0aa2829cd1ee7e997f53a27411bbb92/pyarrow-22.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:b41f37cabfe2463232684de44bad753d6be08a7a072f6a83447eeaf0e4d2a215", size = 45030348, upload-time = "2025-10-24T10:04:43.366Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e5/53c0a1c428f0976bf22f513d79c73000926cb00b9c138d8e02daf2102e18/pyarrow-22.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:35ad0f0378c9359b3f297299c3309778bb03b8612f987399a0333a560b43862d", size = 47699480, upload-time = "2025-10-24T10:04:51.486Z" }, + { url = "https://files.pythonhosted.org/packages/95/e1/9dbe4c465c3365959d183e6345d0a8d1dc5b02ca3f8db4760b3bc834cf25/pyarrow-22.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8382ad21458075c2e66a82a29d650f963ce51c7708c7c0ff313a8c206c4fd5e8", size = 48011148, upload-time = "2025-10-24T10:04:59.585Z" }, + { url = "https://files.pythonhosted.org/packages/c5/b4/7caf5d21930061444c3cf4fa7535c82faf5263e22ce43af7c2759ceb5b8b/pyarrow-22.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1a812a5b727bc09c3d7ea072c4eebf657c2f7066155506ba31ebf4792f88f016", size = 50276964, upload-time = "2025-10-24T10:05:08.175Z" }, + { url = "https://files.pythonhosted.org/packages/ae/f3/cec89bd99fa3abf826f14d4e53d3d11340ce6f6af4d14bdcd54cd83b6576/pyarrow-22.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:ec5d40dd494882704fb876c16fa7261a69791e784ae34e6b5992e977bd2e238c", size = 28106517, upload-time = "2025-10-24T10:05:14.314Z" }, + { url = "https://files.pythonhosted.org/packages/af/63/ba23862d69652f85b615ca14ad14f3bcfc5bf1b99ef3f0cd04ff93fdad5a/pyarrow-22.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:bea79263d55c24a32b0d79c00a1c58bb2ee5f0757ed95656b01c0fb310c5af3d", size = 34211578, upload-time = "2025-10-24T10:05:21.583Z" }, + { url = "https://files.pythonhosted.org/packages/b1/d0/f9ad86fe809efd2bcc8be32032fa72e8b0d112b01ae56a053006376c5930/pyarrow-22.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:12fe549c9b10ac98c91cf791d2945e878875d95508e1a5d14091a7aaa66d9cf8", size = 35989906, upload-time = "2025-10-24T10:05:29.485Z" }, + { url = "https://files.pythonhosted.org/packages/b4/a8/f910afcb14630e64d673f15904ec27dd31f1e009b77033c365c84e8c1e1d/pyarrow-22.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:334f900ff08ce0423407af97e6c26ad5d4e3b0763645559ece6fbf3747d6a8f5", size = 45021677, upload-time = "2025-10-24T10:05:38.274Z" }, + { url = "https://files.pythonhosted.org/packages/13/95/aec81f781c75cd10554dc17a25849c720d54feafb6f7847690478dcf5ef8/pyarrow-22.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c6c791b09c57ed76a18b03f2631753a4960eefbbca80f846da8baefc6491fcfe", size = 47726315, upload-time = "2025-10-24T10:05:47.314Z" }, + { url = "https://files.pythonhosted.org/packages/bb/d4/74ac9f7a54cfde12ee42734ea25d5a3c9a45db78f9def949307a92720d37/pyarrow-22.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c3200cb41cdbc65156e5f8c908d739b0dfed57e890329413da2748d1a2cd1a4e", size = 47990906, upload-time = "2025-10-24T10:05:58.254Z" }, + { url = "https://files.pythonhosted.org/packages/2e/71/fedf2499bf7a95062eafc989ace56572f3343432570e1c54e6599d5b88da/pyarrow-22.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ac93252226cf288753d8b46280f4edf3433bf9508b6977f8dd8526b521a1bbb9", size = 50306783, upload-time = "2025-10-24T10:06:08.08Z" }, + { url = "https://files.pythonhosted.org/packages/68/ed/b202abd5a5b78f519722f3d29063dda03c114711093c1995a33b8e2e0f4b/pyarrow-22.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:44729980b6c50a5f2bfcc2668d36c569ce17f8b17bccaf470c4313dcbbf13c9d", size = 27972883, upload-time = "2025-10-24T10:06:14.204Z" }, + { url = "https://files.pythonhosted.org/packages/a6/d6/d0fac16a2963002fc22c8fa75180a838737203d558f0ed3b564c4a54eef5/pyarrow-22.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:e6e95176209257803a8b3d0394f21604e796dadb643d2f7ca21b66c9c0b30c9a", size = 34204629, upload-time = "2025-10-24T10:06:20.274Z" }, + { url = "https://files.pythonhosted.org/packages/c6/9c/1d6357347fbae062ad3f17082f9ebc29cc733321e892c0d2085f42a2212b/pyarrow-22.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:001ea83a58024818826a9e3f89bf9310a114f7e26dfe404a4c32686f97bd7901", size = 35985783, upload-time = "2025-10-24T10:06:27.301Z" }, + { url = "https://files.pythonhosted.org/packages/ff/c0/782344c2ce58afbea010150df07e3a2f5fdad299cd631697ae7bd3bac6e3/pyarrow-22.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:ce20fe000754f477c8a9125543f1936ea5b8867c5406757c224d745ed033e691", size = 45020999, upload-time = "2025-10-24T10:06:35.387Z" }, + { url = "https://files.pythonhosted.org/packages/1b/8b/5362443737a5307a7b67c1017c42cd104213189b4970bf607e05faf9c525/pyarrow-22.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e0a15757fccb38c410947df156f9749ae4a3c89b2393741a50521f39a8cf202a", size = 47724601, upload-time = "2025-10-24T10:06:43.551Z" }, + { url = "https://files.pythonhosted.org/packages/69/4d/76e567a4fc2e190ee6072967cb4672b7d9249ac59ae65af2d7e3047afa3b/pyarrow-22.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cedb9dd9358e4ea1d9bce3665ce0797f6adf97ff142c8e25b46ba9cdd508e9b6", size = 48001050, upload-time = "2025-10-24T10:06:52.284Z" }, + { url = "https://files.pythonhosted.org/packages/01/5e/5653f0535d2a1aef8223cee9d92944cb6bccfee5cf1cd3f462d7cb022790/pyarrow-22.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:252be4a05f9d9185bb8c18e83764ebcfea7185076c07a7a662253af3a8c07941", size = 50307877, upload-time = "2025-10-24T10:07:02.405Z" }, + { url = "https://files.pythonhosted.org/packages/2d/f8/1d0bd75bf9328a3b826e24a16e5517cd7f9fbf8d34a3184a4566ef5a7f29/pyarrow-22.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:a4893d31e5ef780b6edcaf63122df0f8d321088bb0dee4c8c06eccb1ca28d145", size = 27977099, upload-time = "2025-10-24T10:08:07.259Z" }, + { url = "https://files.pythonhosted.org/packages/90/81/db56870c997805bf2b0f6eeeb2d68458bf4654652dccdcf1bf7a42d80903/pyarrow-22.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:f7fe3dbe871294ba70d789be16b6e7e52b418311e166e0e3cba9522f0f437fb1", size = 34336685, upload-time = "2025-10-24T10:07:11.47Z" }, + { url = "https://files.pythonhosted.org/packages/1c/98/0727947f199aba8a120f47dfc229eeb05df15bcd7a6f1b669e9f882afc58/pyarrow-22.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:ba95112d15fd4f1105fb2402c4eab9068f0554435e9b7085924bcfaac2cc306f", size = 36032158, upload-time = "2025-10-24T10:07:18.626Z" }, + { url = "https://files.pythonhosted.org/packages/96/b4/9babdef9c01720a0785945c7cf550e4acd0ebcd7bdd2e6f0aa7981fa85e2/pyarrow-22.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:c064e28361c05d72eed8e744c9605cbd6d2bb7481a511c74071fd9b24bc65d7d", size = 44892060, upload-time = "2025-10-24T10:07:26.002Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ca/2f8804edd6279f78a37062d813de3f16f29183874447ef6d1aadbb4efa0f/pyarrow-22.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:6f9762274496c244d951c819348afbcf212714902742225f649cf02823a6a10f", size = 47504395, upload-time = "2025-10-24T10:07:34.09Z" }, + { url = "https://files.pythonhosted.org/packages/b9/f0/77aa5198fd3943682b2e4faaf179a674f0edea0d55d326d83cb2277d9363/pyarrow-22.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a9d9ffdc2ab696f6b15b4d1f7cec6658e1d788124418cb30030afbae31c64746", size = 48066216, upload-time = "2025-10-24T10:07:43.528Z" }, + { url = "https://files.pythonhosted.org/packages/79/87/a1937b6e78b2aff18b706d738c9e46ade5bfcf11b294e39c87706a0089ac/pyarrow-22.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ec1a15968a9d80da01e1d30349b2b0d7cc91e96588ee324ce1b5228175043e95", size = 50288552, upload-time = "2025-10-24T10:07:53.519Z" }, + { url = "https://files.pythonhosted.org/packages/60/ae/b5a5811e11f25788ccfdaa8f26b6791c9807119dffcf80514505527c384c/pyarrow-22.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:bba208d9c7decf9961998edf5c65e3ea4355d5818dd6cd0f6809bec1afb951cc", size = 28262504, upload-time = "2025-10-24T10:08:00.932Z" }, + { url = "https://files.pythonhosted.org/packages/bd/b0/0fa4d28a8edb42b0a7144edd20befd04173ac79819547216f8a9f36f9e50/pyarrow-22.0.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:9bddc2cade6561f6820d4cd73f99a0243532ad506bc510a75a5a65a522b2d74d", size = 34224062, upload-time = "2025-10-24T10:08:14.101Z" }, + { url = "https://files.pythonhosted.org/packages/0f/a8/7a719076b3c1be0acef56a07220c586f25cd24de0e3f3102b438d18ae5df/pyarrow-22.0.0-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:e70ff90c64419709d38c8932ea9fe1cc98415c4f87ea8da81719e43f02534bc9", size = 35990057, upload-time = "2025-10-24T10:08:21.842Z" }, + { url = "https://files.pythonhosted.org/packages/89/3c/359ed54c93b47fb6fe30ed16cdf50e3f0e8b9ccfb11b86218c3619ae50a8/pyarrow-22.0.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:92843c305330aa94a36e706c16209cd4df274693e777ca47112617db7d0ef3d7", size = 45068002, upload-time = "2025-10-24T10:08:29.034Z" }, + { url = "https://files.pythonhosted.org/packages/55/fc/4945896cc8638536ee787a3bd6ce7cec8ec9acf452d78ec39ab328efa0a1/pyarrow-22.0.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:6dda1ddac033d27421c20d7a7943eec60be44e0db4e079f33cc5af3b8280ccde", size = 47737765, upload-time = "2025-10-24T10:08:38.559Z" }, + { url = "https://files.pythonhosted.org/packages/cd/5e/7cb7edeb2abfaa1f79b5d5eb89432356155c8426f75d3753cbcb9592c0fd/pyarrow-22.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:84378110dd9a6c06323b41b56e129c504d157d1a983ce8f5443761eb5256bafc", size = 48048139, upload-time = "2025-10-24T10:08:46.784Z" }, + { url = "https://files.pythonhosted.org/packages/88/c6/546baa7c48185f5e9d6e59277c4b19f30f48c94d9dd938c2a80d4d6b067c/pyarrow-22.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:854794239111d2b88b40b6ef92aa478024d1e5074f364033e73e21e3f76b25e0", size = 50314244, upload-time = "2025-10-24T10:08:55.771Z" }, + { url = "https://files.pythonhosted.org/packages/3c/79/755ff2d145aafec8d347bf18f95e4e81c00127f06d080135dfc86aea417c/pyarrow-22.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:b883fe6fd85adad7932b3271c38ac289c65b7337c2c132e9569f9d3940620730", size = 28757501, upload-time = "2025-10-24T10:09:59.891Z" }, + { url = "https://files.pythonhosted.org/packages/0e/d2/237d75ac28ced3147912954e3c1a174df43a95f4f88e467809118a8165e0/pyarrow-22.0.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7a820d8ae11facf32585507c11f04e3f38343c1e784c9b5a8b1da5c930547fe2", size = 34355506, upload-time = "2025-10-24T10:09:02.953Z" }, + { url = "https://files.pythonhosted.org/packages/1e/2c/733dfffe6d3069740f98e57ff81007809067d68626c5faef293434d11bd6/pyarrow-22.0.0-cp314-cp314t-macosx_12_0_x86_64.whl", hash = "sha256:c6ec3675d98915bf1ec8b3c7986422682f7232ea76cad276f4c8abd5b7319b70", size = 36047312, upload-time = "2025-10-24T10:09:10.334Z" }, + { url = "https://files.pythonhosted.org/packages/7c/2b/29d6e3782dc1f299727462c1543af357a0f2c1d3c160ce199950d9ca51eb/pyarrow-22.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:3e739edd001b04f654b166204fc7a9de896cf6007eaff33409ee9e50ceaff754", size = 45081609, upload-time = "2025-10-24T10:09:18.61Z" }, + { url = "https://files.pythonhosted.org/packages/8d/42/aa9355ecc05997915af1b7b947a7f66c02dcaa927f3203b87871c114ba10/pyarrow-22.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7388ac685cab5b279a41dfe0a6ccd99e4dbf322edfb63e02fc0443bf24134e91", size = 47703663, upload-time = "2025-10-24T10:09:27.369Z" }, + { url = "https://files.pythonhosted.org/packages/ee/62/45abedde480168e83a1de005b7b7043fd553321c1e8c5a9a114425f64842/pyarrow-22.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f633074f36dbc33d5c05b5dc75371e5660f1dbf9c8b1d95669def05e5425989c", size = 48066543, upload-time = "2025-10-24T10:09:34.908Z" }, + { url = "https://files.pythonhosted.org/packages/84/e9/7878940a5b072e4f3bf998770acafeae13b267f9893af5f6d4ab3904b67e/pyarrow-22.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4c19236ae2402a8663a2c8f21f1870a03cc57f0bef7e4b6eb3238cc82944de80", size = 50288838, upload-time = "2025-10-24T10:09:44.394Z" }, + { url = "https://files.pythonhosted.org/packages/7b/03/f335d6c52b4a4761bcc83499789a1e2e16d9d201a58c327a9b5cc9a41bd9/pyarrow-22.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0c34fe18094686194f204a3b1787a27456897d8a2d62caf84b61e8dfbc0252ae", size = 29185594, upload-time = "2025-10-24T10:09:53.111Z" }, ] [[package]] @@ -2475,15 +3061,6 @@ crypto = [ { name = "cryptography" }, ] -[[package]] -name = "pyparsing" -version = "3.3.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f3/91/9c6ee907786a473bf81c5f53cf703ba0957b23ab84c264080fb5a450416f/pyparsing-3.3.2.tar.gz", hash = "sha256:c777f4d763f140633dcb6d8a3eda953bf7a214dc4eff598413c070bcdc117cbc", size = 6851574, upload-time = "2026-01-21T03:57:59.36Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/10/bd/c038d7cc38edc1aa5bf91ab8068b63d4308c66c4c8bb3cbba7dfbc049f9c/pyparsing-3.3.2-py3-none-any.whl", hash = "sha256:850ba148bd908d7e2411587e247a1e4f0327839c40e2e5e6d05a007ecc69911d", size = 122781, upload-time = "2026-01-21T03:57:55.912Z" }, -] - [[package]] name = "pysignalr" version = "1.3.0" @@ -3239,24 +3816,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/97/56608b2249fe206a67cd573bc93cd9896e1efb9e98bce9c163bcdc704b88/truststore-0.10.4-py3-none-any.whl", hash = "sha256:adaeaecf1cbb5f4de3b1959b42d41f6fab57b2b1666adb59e89cb0b53361d981", size = 18660, upload-time = "2025-08-12T18:49:01.46Z" }, ] -[[package]] -name = "types-awscrt" -version = "0.31.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/18/24/5497a611f32cbaf4b9e1af35f56463e8f02e198ec513b68cb59a63f5a446/types_awscrt-0.31.2.tar.gz", hash = "sha256:dc79705acd24094656b8105b8d799d7e273c8eac37c69137df580cd84beb54f6", size = 18190, upload-time = "2026-02-16T02:33:53.135Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ab/3d/21a2212b5fcef9e8e9f368403885dc567b7d31e50b2ce393efad3cd83572/types_awscrt-0.31.2-py3-none-any.whl", hash = "sha256:3d6a29c1cca894b191be408f4d985a8e3a14d919785652dd3fa4ee558143e4bf", size = 43340, upload-time = "2026-02-16T02:33:52.109Z" }, -] - -[[package]] -name = "types-s3transfer" -version = "0.16.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/64/42689150509eb3e6e82b33ee3d89045de1592488842ddf23c56957786d05/types_s3transfer-0.16.0.tar.gz", hash = "sha256:b4636472024c5e2b62278c5b759661efeb52a81851cde5f092f24100b1ecb443", size = 13557, upload-time = "2025-12-08T08:13:09.928Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/98/27/e88220fe6274eccd3bdf95d9382918716d312f6f6cef6a46332d1ee2feff/types_s3transfer-0.16.0-py3-none-any.whl", hash = "sha256:1c0cd111ecf6e21437cb410f5cddb631bfb2263b77ad973e79b9c6d0cb24e0ef", size = 19247, upload-time = "2025-12-08T08:13:08.426Z" }, -] - [[package]] name = "typing-extensions" version = "4.15.0" @@ -3280,7 +3839,7 @@ wheels = [ [[package]] name = "uipath" -version = "2.8.35" +version = "2.8.37" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "applicationinsights" }, @@ -3302,9 +3861,9 @@ dependencies = [ { name = "uipath-core" }, { name = "uipath-runtime" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/67/51/0f568f028c11166ff6e2d670c48a6f029d11657ef54c2e5b816be73bd2da/uipath-2.8.35.tar.gz", hash = "sha256:eb85141a995258474fe0d40cf3a99f3e89e3337ba4610ec7a6f30f48076d2794", size = 4363767, upload-time = "2026-02-16T15:22:24.547Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/26/5b5a3f670725132ddf8bb5ad51c458709d6bbd3b2e9401d35e4c07278d3d/uipath-2.8.37.tar.gz", hash = "sha256:6a56852aac9b1d5397638e4e1ea69a209f42aa3fe4c256f5d33806a06a36257d", size = 4365439, upload-time = "2026-02-17T06:22:15.781Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/36/b7/86f7578fe3f560debaa480b61a583c0ce6e4600e8ae709cd7790e46cfd0f/uipath-2.8.35-py3-none-any.whl", hash = "sha256:9458fda9b33bd201d7e45cc83fd2075dae227ea1d3e76eef4b5f90254bd0d577", size = 482307, upload-time = "2026-02-16T15:22:22.439Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/ddb39484f4830d57b5bc5cbbff27606989c2eb64189836c456f3f42d0e45/uipath-2.8.37-py3-none-any.whl", hash = "sha256:07305463a9b0923050a4d24c1eefc13d995932039a879d7b60c27014ca36d79b", size = 482737, upload-time = "2026-02-17T06:22:13.792Z" }, ] [[package]] @@ -3323,7 +3882,7 @@ wheels = [ [[package]] name = "uipath-langchain" -version = "0.5.66" +version = "1.0.0" source = { editable = "." } dependencies = [ { name = "httpx" }, @@ -3332,7 +3891,6 @@ dependencies = [ { name = "langchain" }, { name = "langchain-core" }, { name = "langchain-mcp-adapters" }, - { name = "langchain-openai" }, { name = "langgraph" }, { name = "langgraph-checkpoint-sqlite" }, { name = "mcp" }, @@ -3340,17 +3898,22 @@ dependencies = [ { name = "pydantic-settings" }, { name = "python-dotenv" }, { name = "uipath" }, + { name = "uipath-langchain-client", extra = ["openai"] }, { name = "uipath-runtime" }, ] [package.optional-dependencies] +all = [ + { name = "uipath-langchain-client", extra = ["all"] }, +] +anthropic = [ + { name = "uipath-langchain-client", extra = ["anthropic"] }, +] bedrock = [ - { name = "boto3-stubs" }, - { name = "langchain-aws" }, + { name = "uipath-langchain-client", extra = ["aws"] }, ] vertex = [ - { name = "google-generativeai" }, - { name = "langchain-google-genai" }, + { name = "uipath-langchain-client", extra = ["google", "vertexai"] }, ] [package.dev-dependencies] @@ -3371,17 +3934,12 @@ dev = [ [package.metadata] requires-dist = [ - { name = "boto3-stubs", marker = "extra == 'bedrock'", specifier = ">=1.41.4" }, - { name = "google-generativeai", marker = "extra == 'vertex'", specifier = ">=0.8.0" }, { name = "httpx", specifier = ">=0.27.0" }, { name = "jsonpath-ng", specifier = ">=1.7.0" }, { name = "jsonschema-pydantic-converter", specifier = ">=0.1.8" }, { name = "langchain", specifier = ">=1.0.0,<2.0.0" }, - { name = "langchain-aws", marker = "extra == 'bedrock'", specifier = ">=0.2.35" }, { name = "langchain-core", specifier = ">=1.2.11,<2.0.0" }, - { name = "langchain-google-genai", marker = "extra == 'vertex'", specifier = ">=2.0.0" }, { name = "langchain-mcp-adapters", specifier = "==0.2.1" }, - { name = "langchain-openai", specifier = ">=1.0.0,<2.0.0" }, { name = "langgraph", specifier = ">=1.0.0,<2.0.0" }, { name = "langgraph-checkpoint-sqlite", specifier = ">=3.0.3,<4.0.0" }, { name = "mcp", specifier = "==1.26.0" }, @@ -3389,9 +3947,14 @@ requires-dist = [ { name = "pydantic-settings", specifier = ">=2.6.0" }, { name = "python-dotenv", specifier = ">=1.0.1" }, { name = "uipath", specifier = ">=2.8.35,<2.9.0" }, + { name = "uipath-langchain-client", extras = ["all"], marker = "extra == 'all'", specifier = ">=1.1.7" }, + { name = "uipath-langchain-client", extras = ["anthropic"], marker = "extra == 'anthropic'", specifier = ">=1.1.7" }, + { name = "uipath-langchain-client", extras = ["aws"], marker = "extra == 'bedrock'", specifier = ">=1.1.7" }, + { name = "uipath-langchain-client", extras = ["google", "vertexai"], marker = "extra == 'vertex'", specifier = ">=1.1.7" }, + { name = "uipath-langchain-client", extras = ["openai"], specifier = ">=1.1.7" }, { name = "uipath-runtime", specifier = ">=0.8.6,<0.9.0" }, ] -provides-extras = ["vertex", "bedrock"] +provides-extras = ["bedrock", "anthropic", "vertex", "all"] [package.metadata.requires-dev] dev = [ @@ -3410,24 +3973,72 @@ dev = [ ] [[package]] -name = "uipath-runtime" -version = "0.8.6" +name = "uipath-langchain-client" +version = "1.1.9" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "uipath-core" }, + { name = "langchain" }, + { name = "uipath-llm-client" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/94/d3/b31a7ab49326a5f5174b971426c136da369277c860e548a170aa44cdae7c/uipath_runtime-0.8.6.tar.gz", hash = "sha256:24f9e6238528d29bb2355945191b48bed5d203f717bd29b4e775687d97b14ed8", size = 109413, upload-time = "2026-02-16T13:19:54.202Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8b/5a/7ac5b644037e13a8cc2bd444bc7757911eb29d97aa2192639d916c342ff7/uipath_langchain_client-1.1.9.tar.gz", hash = "sha256:953c04734c201268c4be5f1a446e425fd3c434ad4eeea06600a748dda9ace560", size = 23575, upload-time = "2026-02-13T21:17:30.721Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2e/eb/7027c8bc302da693aa350c67b3f6d0848e1e46958d8d5e0f95f3ee266fe7/uipath_runtime-0.8.6-py3-none-any.whl", hash = "sha256:efd1ce5b778994410e3f24b3c8b16af1787b00a5ea8fb8f4a6a9eaea0c6fe202", size = 41773, upload-time = "2026-02-16T13:19:52.408Z" }, + { url = "https://files.pythonhosted.org/packages/50/34/02bc33f30eba02c1313e8066935a7ff5068623aeed25d348f61bb36c43ce/uipath_langchain_client-1.1.9-py3-none-any.whl", hash = "sha256:77670ddc193a926f8b9d3e6c4c73a087712226e350b422dc384802b136742a21", size = 32708, upload-time = "2026-02-13T21:17:28.289Z" }, +] + +[package.optional-dependencies] +all = [ + { name = "anthropic", extra = ["bedrock", "vertex"] }, + { name = "langchain-anthropic" }, + { name = "langchain-aws" }, + { name = "langchain-azure-ai" }, + { name = "langchain-fireworks" }, + { name = "langchain-google-genai" }, + { name = "langchain-google-vertexai" }, + { name = "langchain-openai" }, +] +anthropic = [ + { name = "anthropic", extra = ["bedrock", "vertex"] }, + { name = "langchain-anthropic" }, +] +aws = [ + { name = "langchain-aws" }, +] +google = [ + { name = "langchain-google-genai" }, +] +openai = [ + { name = "langchain-openai" }, +] +vertexai = [ + { name = "langchain-google-vertexai" }, ] [[package]] -name = "uritemplate" -version = "4.2.0" +name = "uipath-llm-client" +version = "1.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/98/60/f174043244c5306c9988380d2cb10009f91563fc4b31293d27e17201af56/uritemplate-4.2.0.tar.gz", hash = "sha256:480c2ed180878955863323eea31b0ede668795de182617fef9c6ca09e6ec9d0e", size = 33267, upload-time = "2025-06-02T15:12:06.318Z" } +dependencies = [ + { name = "httpx" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "tenacity" }, + { name = "uipath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/92/c1/2d192bd2e4049cad37d3185eef0ba66ab6dcab7a0542ac4841a4eaf534cd/uipath_llm_client-1.1.1.tar.gz", hash = "sha256:90b214f4f85ea2711c06422e8e4b954dfbc56a8c24a78c4edc6d071ea38836c0", size = 383504, upload-time = "2026-02-12T12:43:46.246Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/6a/cc01bc2c2a67a12030f73b8a988aee26f381d5f59d9e3bce127099ca9cb7/uipath_llm_client-1.1.1-py3-none-any.whl", hash = "sha256:524644cc387a3e683c26ce1fc12a17ba22d166c08147317798e84b049e38d609", size = 36697, upload-time = "2026-02-12T12:43:47.351Z" }, +] + +[[package]] +name = "uipath-runtime" +version = "0.8.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "uipath-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/d3/b31a7ab49326a5f5174b971426c136da369277c860e548a170aa44cdae7c/uipath_runtime-0.8.6.tar.gz", hash = "sha256:24f9e6238528d29bb2355945191b48bed5d203f717bd29b4e775687d97b14ed8", size = 109413, upload-time = "2026-02-16T13:19:54.202Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a9/99/3ae339466c9183ea5b8ae87b34c0b897eda475d2aec2307cae60e5cd4f29/uritemplate-4.2.0-py3-none-any.whl", hash = "sha256:962201ba1c4edcab02e60f9a0d3821e82dfc5d2d6662a21abd533879bdb8a686", size = 11488, upload-time = "2025-06-02T15:12:03.405Z" }, + { url = "https://files.pythonhosted.org/packages/2e/eb/7027c8bc302da693aa350c67b3f6d0848e1e46958d8d5e0f95f3ee266fe7/uipath_runtime-0.8.6-py3-none-any.whl", hash = "sha256:efd1ce5b778994410e3f24b3c8b16af1787b00a5ea8fb8f4a6a9eaea0c6fe202", size = 41773, upload-time = "2026-02-16T13:19:52.408Z" }, ] [[package]] @@ -3470,29 +4081,38 @@ wheels = [ [[package]] name = "uvicorn" -version = "0.40.0" +version = "0.41.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "h11" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } +sdist = { url = "https://files.pythonhosted.org/packages/32/ce/eeb58ae4ac36fe09e3842eb02e0eb676bf2c53ae062b98f1b2531673efdd/uvicorn-0.41.0.tar.gz", hash = "sha256:09d11cf7008da33113824ee5a1c6422d89fbc2ff476540d69a34c87fab8b571a", size = 82633, upload-time = "2026-02-16T23:07:24.1Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/e4/d04a086285c20886c0daad0e026f250869201013d18f81d9ff5eada73a88/uvicorn-0.41.0-py3-none-any.whl", hash = "sha256:29e35b1d2c36a04b9e180d4007ede3bcb32a85fbdfd6c6aeb3f26839de088187", size = 68783, upload-time = "2026-02-16T23:07:22.357Z" }, +] + +[[package]] +name = "validators" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/53/66/a435d9ae49850b2f071f7ebd8119dd4e84872b01630d6736761e6e7fd847/validators-0.35.0.tar.gz", hash = "sha256:992d6c48a4e77c81f1b4daba10d16c3a9bb0dbb79b3a19ea847ff0928e70497a", size = 73399, upload-time = "2025-05-01T05:42:06.7Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" }, + { url = "https://files.pythonhosted.org/packages/fa/6e/3e955517e22cbdd565f2f8b2e73d52528b14b8bcfdb04f62466b071de847/validators-0.35.0-py3-none-any.whl", hash = "sha256:e8c947097eae7892cb3d26868d637f79f47b4a0554bc6b80065dfe5aac3705dd", size = 44712, upload-time = "2025-05-01T05:42:04.203Z" }, ] [[package]] name = "virtualenv" -version = "20.36.1" +version = "20.37.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "distlib" }, { name = "filelock" }, { name = "platformdirs" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/aa/a3/4d310fa5f00863544e1d0f4de93bddec248499ccf97d4791bc3122c9d4f3/virtualenv-20.36.1.tar.gz", hash = "sha256:8befb5c81842c641f8ee658481e42641c68b5eab3521d8e092d18320902466ba", size = 6032239, upload-time = "2026-01-09T18:21:01.296Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c1/ef/d9d4ce633df789bf3430bd81fb0d8b9d9465dfc1d1f0deb3fb62cd80f5c2/virtualenv-20.37.0.tar.gz", hash = "sha256:6f7e2064ed470aa7418874e70b6369d53b66bcd9e9fd5389763e96b6c94ccb7c", size = 5864710, upload-time = "2026-02-16T16:17:59.42Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/2a/dc2228b2888f51192c7dc766106cd475f1b768c10caaf9727659726f7391/virtualenv-20.36.1-py3-none-any.whl", hash = "sha256:575a8d6b124ef88f6f51d56d656132389f961062a9177016a50e4f507bbcc19f", size = 6008258, upload-time = "2026-01-09T18:20:59.425Z" }, + { url = "https://files.pythonhosted.org/packages/42/4b/6cf85b485be7ec29db837ec2a1d8cd68bc1147b1abf23d8636c5bd65b3cc/virtualenv-20.37.0-py3-none-any.whl", hash = "sha256:5d3951c32d57232ae3569d4de4cc256c439e045135ebf43518131175d9be435d", size = 5837480, upload-time = "2026-02-16T16:17:57.341Z" }, ] [[package]]