Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
5f924ab
move dataclass into models
jsonbailey Nov 7, 2025
951eda1
create new config types completion, agent, and judges
jsonbailey Nov 7, 2025
ae7516b
use inheritance for configs for consistency
jsonbailey Nov 7, 2025
0d933d2
added deprecations for old types
jsonbailey Nov 7, 2025
8271807
create the ai provider interface and factory
jsonbailey Nov 7, 2025
6ee62b4
create a langchain implementation of the ai provider
jsonbailey Nov 7, 2025
231ae2e
Add Judge and evaluation metric tracking
jsonbailey Nov 8, 2025
445ab8c
Add Chat implementation
jsonbailey Nov 8, 2025
5446222
Set a default for evaluation metircs
jsonbailey Nov 8, 2025
bc46608
add the logger
jsonbailey Nov 8, 2025
fd0aff4
adjust langchain import
jsonbailey Nov 8, 2025
c3c939f
fix structure response
jsonbailey Nov 8, 2025
125bb66
judge respose should be async
jsonbailey Nov 8, 2025
63b1d9e
fix test
edwinokonkwo Dec 9, 2025
cae7952
fix lint
edwinokonkwo Dec 9, 2025
3ffb55d
fix deps
edwinokonkwo Dec 9, 2025
64bb5f7
remove langchain and comment ref lines for now
edwinokonkwo Dec 9, 2025
86acd6e
simplify
edwinokonkwo Dec 10, 2025
11f7602
add judgeConfigKey
edwinokonkwo Dec 10, 2025
06acc21
strongly type JudgeResponse
edwinokonkwo Dec 10, 2025
84669d5
AIJudge to Judge
edwinokonkwo Dec 10, 2025
d57c4f7
add key to model
edwinokonkwo Dec 15, 2025
351d4f1
fixes
edwinokonkwo Dec 16, 2025
7a699ef
fix linting
edwinokonkwo Dec 16, 2025
8d3bfbb
revert to sync
edwinokonkwo Dec 16, 2025
5de380b
judge should set key for responses
jsonbailey Dec 16, 2025
07c5454
use simplified Chat name
jsonbailey Dec 17, 2025
3c77d76
re-order track_metrics_of params to be more intuitive
jsonbailey Dec 17, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions ldai/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,41 @@
__version__ = "0.10.1" # x-release-please-version

# Export main client
# Export chat
from ldai.chat import Chat
from ldai.client import LDAIClient
# Export judge
from ldai.judge import Judge
# Export models for convenience
from ldai.models import ( # Deprecated aliases for backward compatibility
AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents,
AICompletionConfig, AICompletionConfigDefault, AIConfig, AIJudgeConfig,
AIJudgeConfigDefault, JudgeConfiguration, LDAIAgent, LDAIAgentConfig,
LDAIAgentDefaults, LDMessage, ModelConfig, ProviderConfig)
# Export judge types
from ldai.providers.types import EvalScore, JudgeResponse

__all__ = [
'LDAIClient',
'AIAgentConfig',
'AIAgentConfigDefault',
'AIAgentConfigRequest',
'AIAgents',
'AICompletionConfig',
'AICompletionConfigDefault',
'AIJudgeConfig',
'AIJudgeConfigDefault',
'Judge',
'Chat',
'EvalScore',
'JudgeConfiguration',
'JudgeResponse',
'LDMessage',
'ModelConfig',
'ProviderConfig',
# Deprecated exports
'AIConfig',
'LDAIAgent',
'LDAIAgentConfig',
'LDAIAgentDefaults',
]
187 changes: 187 additions & 0 deletions ldai/chat/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
"""Chat implementation for managing AI chat conversations."""

import asyncio
from typing import Any, Dict, List, Optional

from ldai.judge import Judge
from ldai.models import AICompletionConfig, LDMessage
from ldai.providers.ai_provider import AIProvider
from ldai.providers.types import ChatResponse, JudgeResponse
from ldai.tracker import LDAIConfigTracker


class Chat:
"""
Concrete implementation of Chat that provides chat functionality
by delegating to an AIProvider implementation.

This class handles conversation management and tracking, while delegating
the actual model invocation to the provider.
"""

def __init__(
self,
ai_config: AICompletionConfig,
tracker: LDAIConfigTracker,
provider: AIProvider,
judges: Optional[Dict[str, Judge]] = None,
logger: Optional[Any] = None,
):
"""
Initialize the Chat.

:param ai_config: The completion AI configuration
:param tracker: The tracker for the completion configuration
:param provider: The AI provider to use for chat
:param judges: Optional dictionary of judge instances keyed by their configuration keys
:param logger: Optional logger for logging
"""
self._ai_config = ai_config
self._tracker = tracker
self._provider = provider
self._judges = judges or {}
self._logger = logger
self._messages: List[LDMessage] = []

async def invoke(self, prompt: str) -> ChatResponse:
"""
Invoke the chat model with a prompt string.

This method handles conversation management and tracking, delegating to the provider's invoke_model method.

:param prompt: The user prompt to send to the chat model
:return: ChatResponse containing the model's response and metrics
"""
# Convert prompt string to LDMessage with role 'user' and add to conversation history
user_message: LDMessage = LDMessage(role='user', content=prompt)
self._messages.append(user_message)

# Prepend config messages to conversation history for model invocation
config_messages = self._ai_config.messages or []
all_messages = config_messages + self._messages

# Delegate to provider-specific implementation with tracking
response = await self._tracker.track_metrics_of(
lambda: self._provider.invoke_model(all_messages),
lambda result: result.metrics,
)

# Start judge evaluations as async tasks (don't await them)
if (
self._ai_config.judge_configuration
and self._ai_config.judge_configuration.judges
and len(self._ai_config.judge_configuration.judges) > 0
):
response.evaluations = self._start_judge_evaluations(self._messages, response)

# Add the response message to conversation history
self._messages.append(response.message)
return response

def _start_judge_evaluations(
self,
messages: List[LDMessage],
response: ChatResponse,
) -> List[asyncio.Task[Optional[JudgeResponse]]]:
"""
Start judge evaluations as async tasks without awaiting them.

Returns a list of async tasks that can be awaited later.

:param messages: Array of messages representing the conversation history
:param response: The AI response to be evaluated
:return: List of async tasks that will return judge evaluation results
"""
if not self._ai_config.judge_configuration or not self._ai_config.judge_configuration.judges:
return []

judge_configs = self._ai_config.judge_configuration.judges

# Start all judge evaluations as tasks
async def evaluate_judge(judge_config):
judge = self._judges.get(judge_config.key)
if not judge:
if self._logger:
self._logger.warn(
f"Judge configuration is not enabled: {judge_config.key}",
)
return None

eval_result = await judge.evaluate_messages(
messages, response, judge_config.sampling_rate
)

if eval_result and eval_result.success:
self._tracker.track_judge_response(eval_result)

return eval_result

# Create tasks for each judge evaluation
tasks = [
asyncio.create_task(evaluate_judge(judge_config))
for judge_config in judge_configs
]

return tasks

def get_config(self) -> AICompletionConfig:
"""
Get the underlying AI configuration used to initialize this Chat.

:return: The AI completion configuration
"""
return self._ai_config

def get_tracker(self) -> LDAIConfigTracker:
"""
Get the underlying AI configuration tracker used to initialize this Chat.

:return: The tracker instance
"""
return self._tracker

def get_provider(self) -> AIProvider:
"""
Get the underlying AI provider instance.

This provides direct access to the provider for advanced use cases.

:return: The AI provider instance
"""
return self._provider

def get_judges(self) -> Dict[str, Judge]:
"""
Get the judges associated with this Chat.

Returns a dictionary of judge instances keyed by their configuration keys.

:return: Dictionary of judge instances
"""
return self._judges

def append_messages(self, messages: List[LDMessage]) -> None:
"""
Append messages to the conversation history.

Adds messages to the conversation history without invoking the model,
which is useful for managing multi-turn conversations or injecting context.

:param messages: Array of messages to append to the conversation history
"""
self._messages.extend(messages)

def get_messages(self, include_config_messages: bool = False) -> List[LDMessage]:
"""
Get all messages in the conversation history.

:param include_config_messages: Whether to include the config messages from the AIConfig.
Defaults to False.
:return: Array of messages. When include_config_messages is True, returns both config
messages and conversation history with config messages prepended. When False,
returns only the conversation history messages.
"""
if include_config_messages:
config_messages = self._ai_config.messages or []
return config_messages + self._messages
return list(self._messages)
Loading