From 3bec51517f5b4b60dbba67de4e2645193ae47a40 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 11:25:11 +0100 Subject: [PATCH 01/15] Add langchain provider package --- .../server-ai-langchain/README.md | 182 +++++++++++- .../server-ai-langchain/pyproject.toml | 5 +- .../src/ldai_langchain/__init__.py | 11 +- .../src/ldai_langchain/langchain_provider.py | 274 +++++++++++++++++ .../tests/test_langchain_provider.py | 279 ++++++++++++++++++ 5 files changed, 739 insertions(+), 12 deletions(-) create mode 100644 packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py create mode 100644 packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py diff --git a/packages/ai-providers/server-ai-langchain/README.md b/packages/ai-providers/server-ai-langchain/README.md index f612c63..736f7ea 100644 --- a/packages/ai-providers/server-ai-langchain/README.md +++ b/packages/ai-providers/server-ai-langchain/README.md @@ -1,10 +1,8 @@ # LaunchDarkly AI SDK - LangChain Provider -This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK. +[![PyPI](https://img.shields.io/pypi/v/launchdarkly-server-sdk-ai-langchain.svg)](https://pypi.org/project/launchdarkly-server-sdk-ai-langchain/) -## Status - -🚧 **Coming Soon** - This package is a placeholder for future LangChain integration. +This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK, allowing you to use LangChain models and chains with LaunchDarkly's tracking and configuration capabilities. ## Installation @@ -12,12 +10,186 @@ This package provides LangChain integration for the LaunchDarkly Server-Side AI pip install launchdarkly-server-sdk-ai-langchain ``` +You'll also need to install the LangChain provider packages for the models you want to use: + +```bash +# For OpenAI +pip install langchain-openai + +# For Anthropic +pip install langchain-anthropic + +# For Google +pip install langchain-google-genai +``` + +## Quick Start + +```python +import asyncio +from ldclient import LDClient, Config, Context +from ldai import init +from ldai_langchain import LangChainProvider + +# Initialize LaunchDarkly client +ld_client = LDClient(Config("your-sdk-key")) +ai_client = init(ld_client) + +# Get AI configuration +context = Context.builder("user-123").build() +config = ai_client.config("ai-config-key", context, {}) + +async def main(): + # Create a LangChain provider from the AI configuration + provider = await LangChainProvider.create(config) + + # Use the provider to invoke the model + from ldai.models import LDMessage + messages = [ + LDMessage(role="system", content="You are a helpful assistant."), + LDMessage(role="user", content="Hello, how are you?"), + ] + + response = await provider.invoke_model(messages) + print(response.message.content) + +asyncio.run(main()) +``` + ## Usage +### Using LangChainProvider with the Create Factory + +The simplest way to use the LangChain provider is with the static `create` factory method, which automatically creates the appropriate LangChain model based on your LaunchDarkly AI configuration: + ```python -# Coming soon +from ldai_langchain import LangChainProvider + +# Create provider from AI configuration +provider = await LangChainProvider.create(ai_config) + +# Invoke the model +response = await provider.invoke_model(messages) ``` +### Using an Existing LangChain Model + +If you already have a LangChain model configured, you can use it directly: + +```python +from langchain_openai import ChatOpenAI +from ldai_langchain import LangChainProvider + +# Create your own LangChain model +llm = ChatOpenAI(model="gpt-4", temperature=0.7) + +# Wrap it with LangChainProvider +provider = LangChainProvider(llm) + +# Use with LaunchDarkly tracking +response = await provider.invoke_model(messages) +``` + +### Structured Output + +The provider supports structured output using LangChain's `with_structured_output`: + +```python +response_structure = { + "type": "object", + "properties": { + "sentiment": {"type": "string", "enum": ["positive", "negative", "neutral"]}, + "confidence": {"type": "number"}, + }, + "required": ["sentiment", "confidence"], +} + +result = await provider.invoke_structured_model(messages, response_structure) +print(result.data) # {"sentiment": "positive", "confidence": 0.95} +``` + +### Tracking Metrics + +Use the provider with LaunchDarkly's tracking capabilities: + +```python +# Get the AI config with tracker +config = ai_client.config("ai-config-key", context, {}) + +# Create provider +provider = await LangChainProvider.create(config) + +# Track metrics automatically +async def invoke(): + return await provider.invoke_model(messages) + +response = await config.tracker.track_metrics_of( + invoke, + lambda r: r.metrics +) +``` + +### Static Utility Methods + +The `LangChainProvider` class provides several utility methods: + +#### Converting Messages + +```python +from ldai.models import LDMessage +from ldai_langchain import LangChainProvider + +messages = [ + LDMessage(role="system", content="You are helpful."), + LDMessage(role="user", content="Hello!"), +] + +# Convert to LangChain messages +langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) +``` + +#### Extracting Metrics + +```python +from ldai_langchain import LangChainProvider + +# After getting a response from LangChain +metrics = LangChainProvider.get_ai_metrics_from_response(ai_message) +print(f"Success: {metrics.success}") +print(f"Tokens used: {metrics.usage.total if metrics.usage else 'N/A'}") +``` + +#### Provider Name Mapping + +```python +# Map LaunchDarkly provider names to LangChain provider names +langchain_provider = LangChainProvider.map_provider("gemini") # Returns "google-genai" +``` + +## API Reference + +### LangChainProvider + +#### Constructor + +```python +LangChainProvider(llm: BaseChatModel, logger: Optional[Any] = None) +``` + +#### Static Methods + +- `create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> LangChainProvider` - Factory method to create a provider from AI configuration +- `convert_messages_to_langchain(messages: List[LDMessage]) -> List[BaseMessage]` - Convert LaunchDarkly messages to LangChain messages +- `get_ai_metrics_from_response(response: AIMessage) -> LDAIMetrics` - Extract metrics from a LangChain response +- `map_provider(ld_provider_name: str) -> str` - Map LaunchDarkly provider names to LangChain names +- `create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel` - Create a LangChain model from AI configuration + +#### Instance Methods + +- `invoke_model(messages: List[LDMessage]) -> ChatResponse` - Invoke the model with messages +- `invoke_structured_model(messages: List[LDMessage], response_structure: Dict[str, Any]) -> StructuredResponse` - Invoke with structured output +- `get_chat_model() -> BaseChatModel` - Get the underlying LangChain model + ## Documentation For full documentation, please refer to the [LaunchDarkly AI SDK documentation](https://docs.launchdarkly.com/sdk/ai/python). diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index db792e1..c5e3825 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -25,8 +25,8 @@ packages = [{ include = "ldai_langchain", from = "src" }] [tool.poetry.dependencies] python = ">=3.9,<4" launchdarkly-server-sdk-ai = ">=0.10.1" -# langchain-core = ">=0.1.0" # Uncomment when implementing - +langchain-core = ">=0.2.0" +langchain = ">=0.2.0" [tool.poetry.group.dev.dependencies] pytest = ">=2.8" @@ -44,6 +44,7 @@ non_interactive = true [tool.pytest.ini_options] addopts = ["-ra"] testpaths = ["tests"] +asyncio_mode = "auto" [build-system] diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py index bf15780..6a5b5d9 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py @@ -1,14 +1,15 @@ """LaunchDarkly AI SDK - LangChain Provider. -This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK. +This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK, +allowing you to use LangChain models and chains with LaunchDarkly's tracking and +configuration capabilities. """ -__version__ = "0.1.0" +from ldai_langchain.langchain_provider import LangChainProvider -# Placeholder for future LangChain provider implementation -# from ldai_langchain.langchain_provider import LangChainProvider +__version__ = "0.1.0" __all__ = [ '__version__', - # 'LangChainProvider', # Uncomment when implemented + 'LangChainProvider', ] diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py new file mode 100644 index 0000000..4388401 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -0,0 +1,274 @@ +"""LangChain implementation of AIProvider for LaunchDarkly AI SDK.""" + +from typing import Any, Dict, List, Optional, Union + +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, BaseMessage + +from ldai.models import AIConfigKind, LDMessage +from ldai.providers.ai_provider import AIProvider +from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse +from ldai.tracker import TokenUsage + + +class LangChainProvider(AIProvider): + """ + LangChain implementation of AIProvider. + + This provider integrates LangChain models with LaunchDarkly's tracking capabilities. + """ + + def __init__(self, llm: BaseChatModel, logger: Optional[Any] = None): + """ + Initialize the LangChain provider. + + :param llm: A LangChain BaseChatModel instance + :param logger: Optional logger for logging provider operations + """ + super().__init__(logger) + self._llm = llm + + + @staticmethod + async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'LangChainProvider': + """ + Static factory method to create a LangChain AIProvider from an AI configuration. + + :param ai_config: The LaunchDarkly AI configuration + :param logger: Optional logger for the provider + :return: Configured LangChainProvider instance + """ + llm = await LangChainProvider.create_langchain_model(ai_config) + return LangChainProvider(llm, logger) + + async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: + """ + Invoke the LangChain model with an array of messages. + + :param messages: Array of LDMessage objects representing the conversation + :return: ChatResponse containing the model's response and metrics + """ + try: + # Convert LDMessage[] to LangChain messages + langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) + + # Get the LangChain response + response: AIMessage = await self._llm.ainvoke(langchain_messages) + + # Generate metrics early (assumes success by default) + metrics = LangChainProvider.get_ai_metrics_from_response(response) + + # Extract text content from the response + content: str = '' + if isinstance(response.content, str): + content = response.content + else: + # Log warning for non-string content (likely multimodal) + if self.logger: + self.logger.warn( + f'Multimodal response not supported, expecting a string. ' + f'Content type: {type(response.content)}, Content: {response.content}' + ) + # Update metrics to reflect content loss + metrics = LDAIMetrics(success=False, usage=metrics.usage) + + # Create the assistant message + assistant_message = LDMessage(role='assistant', content=content) + + return ChatResponse( + message=assistant_message, + metrics=metrics, + ) + except Exception as error: + if self.logger: + self.logger.warn(f'LangChain model invocation failed: {error}') + + return ChatResponse( + message=LDMessage(role='assistant', content=''), + metrics=LDAIMetrics(success=False, usage=None), + ) + + async def invoke_structured_model( + self, + messages: List[LDMessage], + response_structure: Dict[str, Any], + ) -> StructuredResponse: + """ + Invoke the LangChain model with structured output support. + + :param messages: Array of LDMessage objects representing the conversation + :param response_structure: Dictionary defining the output structure + :return: StructuredResponse containing the structured data + """ + try: + # Convert LDMessage[] to LangChain messages + langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) + + # Get the LangChain response with structured output + structured_llm = self._llm.with_structured_output(response_structure) + response = await structured_llm.ainvoke(langchain_messages) + + # Using structured output doesn't support metrics + metrics = LDAIMetrics( + success=True, + usage=TokenUsage(total=0, input=0, output=0), + ) + + # Handle response serialization + if isinstance(response, dict): + raw_response = str(response) + else: + import json + try: + raw_response = json.dumps(response) + except (TypeError, ValueError): + raw_response = str(response) + + return StructuredResponse( + data=response if isinstance(response, dict) else {'result': response}, + raw_response=raw_response, + metrics=metrics, + ) + except Exception as error: + if self.logger: + self.logger.warn(f'LangChain structured model invocation failed: {error}') + + return StructuredResponse( + data={}, + raw_response='', + metrics=LDAIMetrics( + success=False, + usage=TokenUsage(total=0, input=0, output=0), + ), + ) + + def get_chat_model(self) -> BaseChatModel: + """ + Get the underlying LangChain model instance. + + :return: The underlying BaseChatModel + """ + return self._llm + + # ============================================================================= + # STATIC UTILITY METHODS + # ============================================================================= + + @staticmethod + def map_provider(ld_provider_name: str) -> str: + """ + Map LaunchDarkly provider names to LangChain provider names. + + This method enables seamless integration between LaunchDarkly's standardized + provider naming and LangChain's naming conventions. + + :param ld_provider_name: LaunchDarkly provider name + :return: LangChain-compatible provider name + """ + lowercased_name = ld_provider_name.lower() + + mapping: Dict[str, str] = { + 'gemini': 'google-genai', + } + + return mapping.get(lowercased_name, lowercased_name) + + @staticmethod + def get_ai_metrics_from_response(response: AIMessage) -> LDAIMetrics: + """ + Get AI metrics from a LangChain provider response. + + This method extracts token usage information and success status from LangChain responses + and returns a LaunchDarkly AIMetrics object. + + :param response: The response from the LangChain model + :return: LDAIMetrics with success status and token usage + + Example: + # Use with tracker.track_metrics_of for automatic tracking + response = await tracker.track_metrics_of( + lambda: llm.ainvoke(messages), + LangChainProvider.get_ai_metrics_from_response + ) + """ + # Extract token usage if available + usage: Optional[TokenUsage] = None + if hasattr(response, 'response_metadata') and response.response_metadata: + token_usage = response.response_metadata.get('tokenUsage') or response.response_metadata.get('token_usage') + if token_usage: + usage = TokenUsage( + total=token_usage.get('totalTokens', 0) or token_usage.get('total_tokens', 0), + input=token_usage.get('promptTokens', 0) or token_usage.get('prompt_tokens', 0), + output=token_usage.get('completionTokens', 0) or token_usage.get('completion_tokens', 0), + ) + + # LangChain responses that complete successfully are considered successful by default + return LDAIMetrics(success=True, usage=usage) + + @staticmethod + def create_ai_metrics(langchain_response: AIMessage) -> LDAIMetrics: + """ + Create AI metrics information from a LangChain provider response. + + .. deprecated:: + Use `get_ai_metrics_from_response()` instead. + + :param langchain_response: The response from the LangChain model + :return: LDAIMetrics with success status and token usage + """ + return LangChainProvider.get_ai_metrics_from_response(langchain_response) + + @staticmethod + def convert_messages_to_langchain( + messages: List[LDMessage], + ) -> List[Union[HumanMessage, SystemMessage, AIMessage]]: + """ + Convert LaunchDarkly messages to LangChain messages. + + This helper method enables developers to work directly with LangChain message types + while maintaining compatibility with LaunchDarkly's standardized message format. + + :param messages: List of LDMessage objects + :return: List of LangChain message objects + :raises ValueError: If an unsupported message role is encountered + """ + result: List[Union[HumanMessage, SystemMessage, AIMessage]] = [] + + for msg in messages: + if msg.role == 'system': + result.append(SystemMessage(content=msg.content)) + elif msg.role == 'user': + result.append(HumanMessage(content=msg.content)) + elif msg.role == 'assistant': + result.append(AIMessage(content=msg.content)) + else: + raise ValueError(f'Unsupported message role: {msg.role}') + + return result + + @staticmethod + async def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: + """ + Create a LangChain model from an AI configuration. + + This public helper method enables developers to initialize their own LangChain models + using LaunchDarkly AI configurations. + + :param ai_config: The LaunchDarkly AI configuration + :return: A configured LangChain BaseChatModel + """ + from langchain.chat_models import init_chat_model + + model_name = ai_config.model.name if ai_config.model else '' + provider = ai_config.provider.name if ai_config.provider else '' + parameters = {} + + if ai_config.model and hasattr(ai_config.model, '_parameters') and ai_config.model._parameters: + parameters = ai_config.model._parameters.copy() + + # Use LangChain's universal init_chat_model to support multiple providers + return init_chat_model( + model_name, + model_provider=LangChainProvider.map_provider(provider), + **parameters, + ) diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py new file mode 100644 index 0000000..2d75494 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -0,0 +1,279 @@ +"""Tests for LangChain Provider.""" + +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage + +from ldai_langchain import LangChainProvider +from ldai.models import LDMessage +from ldai.tracker import TokenUsage + + +class TestConvertMessagesToLangchain: + """Tests for convert_messages_to_langchain static method.""" + + def test_converts_system_messages_to_system_message(self): + """Should convert system messages to SystemMessage.""" + messages = [LDMessage(role='system', content='You are a helpful assistant.')] + result = LangChainProvider.convert_messages_to_langchain(messages) + + assert len(result) == 1 + assert isinstance(result[0], SystemMessage) + assert result[0].content == 'You are a helpful assistant.' + + def test_converts_user_messages_to_human_message(self): + """Should convert user messages to HumanMessage.""" + messages = [LDMessage(role='user', content='Hello, how are you?')] + result = LangChainProvider.convert_messages_to_langchain(messages) + + assert len(result) == 1 + assert isinstance(result[0], HumanMessage) + assert result[0].content == 'Hello, how are you?' + + def test_converts_assistant_messages_to_ai_message(self): + """Should convert assistant messages to AIMessage.""" + messages = [LDMessage(role='assistant', content='I am doing well, thank you!')] + result = LangChainProvider.convert_messages_to_langchain(messages) + + assert len(result) == 1 + assert isinstance(result[0], AIMessage) + assert result[0].content == 'I am doing well, thank you!' + + def test_converts_multiple_messages_in_order(self): + """Should convert multiple messages in order.""" + messages = [ + LDMessage(role='system', content='You are a helpful assistant.'), + LDMessage(role='user', content='What is the weather like?'), + LDMessage(role='assistant', content='I cannot check the weather.'), + ] + result = LangChainProvider.convert_messages_to_langchain(messages) + + assert len(result) == 3 + assert isinstance(result[0], SystemMessage) + assert isinstance(result[1], HumanMessage) + assert isinstance(result[2], AIMessage) + + def test_throws_error_for_unsupported_message_role(self): + """Should throw error for unsupported message role.""" + # Create a mock message with unsupported role + class MockMessage: + role = 'unknown' + content = 'Test message' + + with pytest.raises(ValueError, match='Unsupported message role: unknown'): + LangChainProvider.convert_messages_to_langchain([MockMessage()]) # type: ignore + + def test_handles_empty_message_array(self): + """Should handle empty message array.""" + result = LangChainProvider.convert_messages_to_langchain([]) + assert len(result) == 0 + + +class TestGetAIMetricsFromResponse: + """Tests for get_ai_metrics_from_response static method.""" + + def test_creates_metrics_with_success_true_and_token_usage(self): + """Should create metrics with success=True and token usage.""" + mock_response = AIMessage(content='Test response') + mock_response.response_metadata = { + 'tokenUsage': { + 'totalTokens': 100, + 'promptTokens': 50, + 'completionTokens': 50, + }, + } + + result = LangChainProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 100 + assert result.usage.input == 50 + assert result.usage.output == 50 + + def test_creates_metrics_with_snake_case_token_usage(self): + """Should create metrics with snake_case token usage keys.""" + mock_response = AIMessage(content='Test response') + mock_response.response_metadata = { + 'token_usage': { + 'total_tokens': 150, + 'prompt_tokens': 75, + 'completion_tokens': 75, + }, + } + + result = LangChainProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 150 + assert result.usage.input == 75 + assert result.usage.output == 75 + + def test_creates_metrics_with_success_true_and_no_usage_when_metadata_missing(self): + """Should create metrics with success=True and no usage when metadata is missing.""" + mock_response = AIMessage(content='Test response') + + result = LangChainProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is None + + +class TestMapProvider: + """Tests for map_provider static method.""" + + def test_maps_gemini_to_google_genai(self): + """Should map gemini to google-genai.""" + assert LangChainProvider.map_provider('gemini') == 'google-genai' + assert LangChainProvider.map_provider('Gemini') == 'google-genai' + assert LangChainProvider.map_provider('GEMINI') == 'google-genai' + + def test_returns_provider_name_unchanged_for_unmapped_providers(self): + """Should return provider name unchanged for unmapped providers.""" + assert LangChainProvider.map_provider('openai') == 'openai' + assert LangChainProvider.map_provider('anthropic') == 'anthropic' + assert LangChainProvider.map_provider('unknown') == 'unknown' + + +class TestInvokeModel: + """Tests for invoke_model instance method.""" + + @pytest.fixture + def mock_llm(self): + """Create a mock LLM.""" + return MagicMock() + + @pytest.fixture + def mock_logger(self): + """Create a mock logger.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_returns_success_true_for_string_content(self, mock_llm, mock_logger): + """Should return success=True for string content.""" + mock_response = AIMessage(content='Test response') + mock_llm.ainvoke = AsyncMock(return_value=mock_response) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + result = await provider.invoke_model(messages) + + assert result.metrics.success is True + assert result.message.content == 'Test response' + mock_logger.warn.assert_not_called() + + @pytest.mark.asyncio + async def test_returns_success_false_for_non_string_content_and_logs_warning(self, mock_llm, mock_logger): + """Should return success=False for non-string content and log warning.""" + mock_response = AIMessage(content=[{'type': 'image', 'data': 'base64data'}]) + mock_llm.ainvoke = AsyncMock(return_value=mock_response) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + result = await provider.invoke_model(messages) + + assert result.metrics.success is False + assert result.message.content == '' + mock_logger.warn.assert_called_once() + + @pytest.mark.asyncio + async def test_returns_success_false_when_model_invocation_throws_error(self, mock_llm, mock_logger): + """Should return success=False when model invocation throws an error.""" + error = Exception('Model invocation failed') + mock_llm.ainvoke = AsyncMock(side_effect=error) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + result = await provider.invoke_model(messages) + + assert result.metrics.success is False + assert result.message.content == '' + assert result.message.role == 'assistant' + mock_logger.warn.assert_called() + + +class TestInvokeStructuredModel: + """Tests for invoke_structured_model instance method.""" + + @pytest.fixture + def mock_llm(self): + """Create a mock LLM.""" + return MagicMock() + + @pytest.fixture + def mock_logger(self): + """Create a mock logger.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_returns_success_true_for_successful_invocation(self, mock_llm, mock_logger): + """Should return success=True for successful invocation.""" + mock_response = {'result': 'structured data'} + mock_structured_llm = MagicMock() + mock_structured_llm.ainvoke = AsyncMock(return_value=mock_response) + mock_llm.with_structured_output = MagicMock(return_value=mock_structured_llm) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + response_structure = {'type': 'object', 'properties': {}} + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.metrics.success is True + assert result.data == mock_response + mock_logger.warn.assert_not_called() + + @pytest.mark.asyncio + async def test_returns_success_false_when_structured_model_invocation_throws_error(self, mock_llm, mock_logger): + """Should return success=False when structured model invocation throws an error.""" + error = Exception('Structured invocation failed') + mock_structured_llm = MagicMock() + mock_structured_llm.ainvoke = AsyncMock(side_effect=error) + mock_llm.with_structured_output = MagicMock(return_value=mock_structured_llm) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + response_structure = {'type': 'object', 'properties': {}} + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.metrics.success is False + assert result.data == {} + assert result.raw_response == '' + assert result.metrics.usage is not None + assert result.metrics.usage.total == 0 + mock_logger.warn.assert_called() + + +class TestGetChatModel: + """Tests for get_chat_model instance method.""" + + def test_returns_underlying_llm(self): + """Should return the underlying LLM.""" + mock_llm = MagicMock() + provider = LangChainProvider(mock_llm) + + assert provider.get_chat_model() is mock_llm + + +class TestCreateAIMetrics: + """Tests for deprecated create_ai_metrics static method.""" + + def test_delegates_to_get_ai_metrics_from_response(self): + """Should delegate to get_ai_metrics_from_response.""" + mock_response = AIMessage(content='Test response') + mock_response.response_metadata = { + 'tokenUsage': { + 'totalTokens': 100, + 'promptTokens': 50, + 'completionTokens': 50, + }, + } + + result = LangChainProvider.create_ai_metrics(mock_response) + expected = LangChainProvider.get_ai_metrics_from_response(mock_response) + + assert result.success == expected.success + assert result.usage.total == expected.usage.total + assert result.usage.input == expected.usage.input + assert result.usage.output == expected.usage.output From 127034d5e771cb31e665caf1fbd3e3b5f7fef7c9 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 11:31:20 +0100 Subject: [PATCH 02/15] add ci testing for langchain --- .github/workflows/ci.yml | 46 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d1980dd..87549b3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,3 +58,49 @@ jobs: - name: Run tests run: make -C packages/sdk/server-ai test + + server-ai-langchain-linux: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - uses: ./.github/actions/ci + with: + workspace_path: packages/ai-providers/server-ai-langchain + python_version: ${{ matrix.python-version }} + + - uses: ./.github/actions/build + with: + workspace_path: packages/ai-providers/server-ai-langchain + + server-ai-langchain-windows: + runs-on: windows-latest + defaults: + run: + shell: powershell + + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install poetry + uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + + - name: Install requirements + working-directory: packages/ai-providers/server-ai-langchain + run: poetry install + + - name: Run tests + run: make -C packages/ai-providers/server-ai-langchain test From d95733690eb375adda011ace5c16d4e5665f301e Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 11:33:29 +0100 Subject: [PATCH 03/15] fix annotation. --- .../src/ldai_langchain/langchain_provider.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index 4388401..1497440 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -53,7 +53,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) # Get the LangChain response - response: AIMessage = await self._llm.ainvoke(langchain_messages) + response: BaseMessage = await self._llm.ainvoke(langchain_messages) # Generate metrics early (assumes success by default) metrics = LangChainProvider.get_ai_metrics_from_response(response) @@ -174,7 +174,7 @@ def map_provider(ld_provider_name: str) -> str: return mapping.get(lowercased_name, lowercased_name) @staticmethod - def get_ai_metrics_from_response(response: AIMessage) -> LDAIMetrics: + def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics: """ Get AI metrics from a LangChain provider response. @@ -206,7 +206,7 @@ def get_ai_metrics_from_response(response: AIMessage) -> LDAIMetrics: return LDAIMetrics(success=True, usage=usage) @staticmethod - def create_ai_metrics(langchain_response: AIMessage) -> LDAIMetrics: + def create_ai_metrics(langchain_response: BaseMessage) -> LDAIMetrics: """ Create AI metrics information from a LangChain provider response. From 3a5e5d43e92c20922569fbcd131699957e1c44ed Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 11:36:05 +0100 Subject: [PATCH 04/15] fix pycodestyle --- packages/ai-providers/server-ai-langchain/Makefile | 1 + packages/ai-providers/server-ai-langchain/pyproject.toml | 2 ++ packages/ai-providers/server-ai-langchain/setup.cfg | 2 ++ 3 files changed, 5 insertions(+) create mode 100644 packages/ai-providers/server-ai-langchain/setup.cfg diff --git a/packages/ai-providers/server-ai-langchain/Makefile b/packages/ai-providers/server-ai-langchain/Makefile index 2486d17..ca02807 100644 --- a/packages/ai-providers/server-ai-langchain/Makefile +++ b/packages/ai-providers/server-ai-langchain/Makefile @@ -20,6 +20,7 @@ test: install lint: #! Run type analysis and linting checks lint: install poetry run mypy src/ldai_langchain + poetry run isort --check --atomic src/ldai_langchain poetry run pycodestyle src/ldai_langchain .PHONY: build diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index c5e3825..c0adbbd 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -33,6 +33,8 @@ pytest = ">=2.8" pytest-cov = ">=2.4.0" pytest-asyncio = ">=0.21.0" mypy = "==1.18.2" +pycodestyle = ">=2.11.0" +isort = ">=5.12.0" [tool.mypy] python_version = "3.9" diff --git a/packages/ai-providers/server-ai-langchain/setup.cfg b/packages/ai-providers/server-ai-langchain/setup.cfg new file mode 100644 index 0000000..6224f31 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/setup.cfg @@ -0,0 +1,2 @@ +[pycodestyle] +max-line-length = 120 From a743ecfe6ce6743a663a8c1ed4f4ce8c61a772a1 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 11:38:06 +0100 Subject: [PATCH 05/15] fix imports --- .../src/ldai_langchain/langchain_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index 1497440..63221fc 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Union from langchain_core.language_models.chat_models import BaseChatModel -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, BaseMessage +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage from ldai.models import AIConfigKind, LDMessage from ldai.providers.ai_provider import AIProvider From 550294ff4041f2ccc09b490a4dffcd2174f1f95e Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 11:43:03 +0100 Subject: [PATCH 06/15] Update imports, add isort configuration, and improve type aliasing for better clarity and organization. --- .../ai-providers/server-ai-langchain/pyproject.toml | 5 +++++ .../src/ldai_langchain/langchain_provider.py | 12 ++++++++++-- .../tests/test_langchain_provider.py | 6 +++--- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index c0adbbd..c99c518 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -42,6 +42,11 @@ ignore_missing_imports = true install_types = true non_interactive = true +[tool.isort] +profile = "black" +known_third_party = ["langchain", "langchain_core", "ldai"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] + [tool.pytest.ini_options] addopts = ["-ra"] diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index 63221fc..b0309ff 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -5,11 +5,19 @@ from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage -from ldai.models import AIConfigKind, LDMessage -from ldai.providers.ai_provider import AIProvider +from ldai import ( + AIAgentConfig, + AICompletionConfig, + AIJudgeConfig, + LDMessage, +) +from ldai.providers import AIProvider from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse from ldai.tracker import TokenUsage +# Type alias matching the one in ldai.models +AIConfigKind = Union[AIAgentConfig, AICompletionConfig, AIJudgeConfig] + class LangChainProvider(AIProvider): """ diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py index 2d75494..0f9b30f 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -1,13 +1,13 @@ """Tests for LangChain Provider.""" import pytest -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, MagicMock from langchain_core.messages import AIMessage, HumanMessage, SystemMessage +from ldai import LDMessage + from ldai_langchain import LangChainProvider -from ldai.models import LDMessage -from ldai.tracker import TokenUsage class TestConvertMessagesToLangchain: From 717c80ba723886fdc6de7566be726d11c9bb18c1 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 11:59:59 +0100 Subject: [PATCH 07/15] fix installation order --- .github/workflows/ci.yml | 7 +++++++ packages/ai-providers/server-ai-langchain/pyproject.toml | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 87549b3..3584e65 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -98,6 +98,13 @@ jobs: - name: Install poetry uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + - name: Configure poetry for local virtualenvs + run: poetry config virtualenvs.in-project true + + - name: Install server-ai dependency first + working-directory: packages/sdk/server-ai + run: poetry install + - name: Install requirements working-directory: packages/ai-providers/server-ai-langchain run: poetry install diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index c99c518..dbfb062 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -24,7 +24,7 @@ packages = [{ include = "ldai_langchain", from = "src" }] [tool.poetry.dependencies] python = ">=3.9,<4" -launchdarkly-server-sdk-ai = ">=0.10.1" +launchdarkly-server-sdk-ai = { path = "../../sdk/server-ai", develop = true } langchain-core = ">=0.2.0" langchain = ">=0.2.0" From 7e21a9887726b17ec4fc2c88e76b3065f9e38771 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 12:14:13 +0100 Subject: [PATCH 08/15] fix lint --- .../src/ldai_langchain/langchain_provider.py | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index b0309ff..b0a32f2 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -4,7 +4,6 @@ from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage - from ldai import ( AIAgentConfig, AICompletionConfig, @@ -22,7 +21,7 @@ class LangChainProvider(AIProvider): """ LangChain implementation of AIProvider. - + This provider integrates LangChain models with LaunchDarkly's tracking capabilities. """ @@ -36,7 +35,6 @@ def __init__(self, llm: BaseChatModel, logger: Optional[Any] = None): super().__init__(logger) self._llm = llm - @staticmethod async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'LangChainProvider': """ @@ -166,7 +164,7 @@ def get_chat_model(self) -> BaseChatModel: def map_provider(ld_provider_name: str) -> str: """ Map LaunchDarkly provider names to LangChain provider names. - + This method enables seamless integration between LaunchDarkly's standardized provider naming and LangChain's naming conventions. @@ -185,7 +183,7 @@ def map_provider(ld_provider_name: str) -> str: def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics: """ Get AI metrics from a LangChain provider response. - + This method extracts token usage information and success status from LangChain responses and returns a LaunchDarkly AIMetrics object. @@ -217,7 +215,7 @@ def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics: def create_ai_metrics(langchain_response: BaseMessage) -> LDAIMetrics: """ Create AI metrics information from a LangChain provider response. - + .. deprecated:: Use `get_ai_metrics_from_response()` instead. @@ -232,7 +230,7 @@ def convert_messages_to_langchain( ) -> List[Union[HumanMessage, SystemMessage, AIMessage]]: """ Convert LaunchDarkly messages to LangChain messages. - + This helper method enables developers to work directly with LangChain message types while maintaining compatibility with LaunchDarkly's standardized message format. @@ -241,7 +239,7 @@ def convert_messages_to_langchain( :raises ValueError: If an unsupported message role is encountered """ result: List[Union[HumanMessage, SystemMessage, AIMessage]] = [] - + for msg in messages: if msg.role == 'system': result.append(SystemMessage(content=msg.content)) @@ -251,14 +249,14 @@ def convert_messages_to_langchain( result.append(AIMessage(content=msg.content)) else: raise ValueError(f'Unsupported message role: {msg.role}') - + return result @staticmethod async def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: """ Create a LangChain model from an AI configuration. - + This public helper method enables developers to initialize their own LangChain models using LaunchDarkly AI configurations. @@ -270,7 +268,7 @@ async def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: model_name = ai_config.model.name if ai_config.model else '' provider = ai_config.provider.name if ai_config.provider else '' parameters = {} - + if ai_config.model and hasattr(ai_config.model, '_parameters') and ai_config.model._parameters: parameters = ai_config.model._parameters.copy() From e2465ba4c6842457542feb108adbbaef0fb1eef3 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 12:30:17 +0100 Subject: [PATCH 09/15] fixes --- .../server-ai-langchain/pyproject.toml | 3 +++ .../src/ldai_langchain/langchain_provider.py | 15 ++++----------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index dbfb062..5918a85 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -24,6 +24,9 @@ packages = [{ include = "ldai_langchain", from = "src" }] [tool.poetry.dependencies] python = ">=3.9,<4" +# TODO: Before publishing, change to PyPI version: launchdarkly-server-sdk-ai = ">=X.Y.Z" +# Path dependency is used for development because the published version doesn't yet export +# the required types (LDMessage, AIConfigKind, etc.). Release server-ai first, then update. launchdarkly-server-sdk-ai = { path = "../../sdk/server-ai", develop = true } langchain-core = ">=0.2.0" langchain = ">=0.2.0" diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index b0a32f2..2e66530 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -4,19 +4,12 @@ from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage -from ldai import ( - AIAgentConfig, - AICompletionConfig, - AIJudgeConfig, - LDMessage, -) +from ldai import LDMessage +from ldai.models import AIConfigKind from ldai.providers import AIProvider from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse from ldai.tracker import TokenUsage -# Type alias matching the one in ldai.models -AIConfigKind = Union[AIAgentConfig, AICompletionConfig, AIJudgeConfig] - class LangChainProvider(AIProvider): """ @@ -44,7 +37,7 @@ async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'Lang :param logger: Optional logger for the provider :return: Configured LangChainProvider instance """ - llm = await LangChainProvider.create_langchain_model(ai_config) + llm = LangChainProvider.create_langchain_model(ai_config) return LangChainProvider(llm, logger) async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: @@ -253,7 +246,7 @@ def convert_messages_to_langchain( return result @staticmethod - async def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: + def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: """ Create a LangChain model from an AI configuration. From cf8c57d613379fb3849901826550195ea636d222 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 22:10:32 +0100 Subject: [PATCH 10/15] Update launchdarkly-server-sdk-ai dependency to version 0.11.0 --- packages/ai-providers/server-ai-langchain/pyproject.toml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index 5918a85..0517f03 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -24,10 +24,7 @@ packages = [{ include = "ldai_langchain", from = "src" }] [tool.poetry.dependencies] python = ">=3.9,<4" -# TODO: Before publishing, change to PyPI version: launchdarkly-server-sdk-ai = ">=X.Y.Z" -# Path dependency is used for development because the published version doesn't yet export -# the required types (LDMessage, AIConfigKind, etc.). Release server-ai first, then update. -launchdarkly-server-sdk-ai = { path = "../../sdk/server-ai", develop = true } +launchdarkly-server-sdk-ai = ">=0.11.0" langchain-core = ">=0.2.0" langchain = ">=0.2.0" From 7ba944048a6c197280b353b432923ab541326843 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 22:14:17 +0100 Subject: [PATCH 11/15] Remove deprecated create_ai_metrics method --- .../src/ldai_langchain/langchain_provider.py | 13 ------------ .../tests/test_langchain_provider.py | 21 ------------------- 2 files changed, 34 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index 2e66530..41b7c28 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -204,19 +204,6 @@ def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics: # LangChain responses that complete successfully are considered successful by default return LDAIMetrics(success=True, usage=usage) - @staticmethod - def create_ai_metrics(langchain_response: BaseMessage) -> LDAIMetrics: - """ - Create AI metrics information from a LangChain provider response. - - .. deprecated:: - Use `get_ai_metrics_from_response()` instead. - - :param langchain_response: The response from the LangChain model - :return: LDAIMetrics with success status and token usage - """ - return LangChainProvider.get_ai_metrics_from_response(langchain_response) - @staticmethod def convert_messages_to_langchain( messages: List[LDMessage], diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py index 0f9b30f..af270d6 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -256,24 +256,3 @@ def test_returns_underlying_llm(self): assert provider.get_chat_model() is mock_llm -class TestCreateAIMetrics: - """Tests for deprecated create_ai_metrics static method.""" - - def test_delegates_to_get_ai_metrics_from_response(self): - """Should delegate to get_ai_metrics_from_response.""" - mock_response = AIMessage(content='Test response') - mock_response.response_metadata = { - 'tokenUsage': { - 'totalTokens': 100, - 'promptTokens': 50, - 'completionTokens': 50, - }, - } - - result = LangChainProvider.create_ai_metrics(mock_response) - expected = LangChainProvider.get_ai_metrics_from_response(mock_response) - - assert result.success == expected.success - assert result.usage.total == expected.usage.total - assert result.usage.input == expected.usage.input - assert result.usage.output == expected.usage.output From 5b401a5ec7f1c8dd4d7c6e921962312e0fa20d0f Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo <44273770+edwinokonkwo@users.noreply.github.com> Date: Fri, 19 Dec 2025 22:15:00 +0100 Subject: [PATCH 12/15] Update packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py Co-authored-by: Jason Bailey --- .../server-ai-langchain/src/ldai_langchain/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py index 6a5b5d9..1282648 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py @@ -1,8 +1,6 @@ """LaunchDarkly AI SDK - LangChain Provider. This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK, -allowing you to use LangChain models and chains with LaunchDarkly's tracking and -configuration capabilities. """ from ldai_langchain.langchain_provider import LangChainProvider From 93e6a8efae6071e8685310e1b071bdc1b2907ff6 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 22:17:50 +0100 Subject: [PATCH 13/15] remove extra comments. --- .../src/ldai_langchain/langchain_provider.py | 23 +------------------ 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index 41b7c28..2c78bcf 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -48,34 +48,23 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: :return: ChatResponse containing the model's response and metrics """ try: - # Convert LDMessage[] to LangChain messages langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) - - # Get the LangChain response response: BaseMessage = await self._llm.ainvoke(langchain_messages) - - # Generate metrics early (assumes success by default) metrics = LangChainProvider.get_ai_metrics_from_response(response) - # Extract text content from the response content: str = '' if isinstance(response.content, str): content = response.content else: - # Log warning for non-string content (likely multimodal) if self.logger: self.logger.warn( f'Multimodal response not supported, expecting a string. ' f'Content type: {type(response.content)}, Content: {response.content}' ) - # Update metrics to reflect content loss metrics = LDAIMetrics(success=False, usage=metrics.usage) - # Create the assistant message - assistant_message = LDMessage(role='assistant', content=content) - return ChatResponse( - message=assistant_message, + message=LDMessage(role='assistant', content=content), metrics=metrics, ) except Exception as error: @@ -100,20 +89,15 @@ async def invoke_structured_model( :return: StructuredResponse containing the structured data """ try: - # Convert LDMessage[] to LangChain messages langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) - - # Get the LangChain response with structured output structured_llm = self._llm.with_structured_output(response_structure) response = await structured_llm.ainvoke(langchain_messages) - # Using structured output doesn't support metrics metrics = LDAIMetrics( success=True, usage=TokenUsage(total=0, input=0, output=0), ) - # Handle response serialization if isinstance(response, dict): raw_response = str(response) else: @@ -149,10 +133,6 @@ def get_chat_model(self) -> BaseChatModel: """ return self._llm - # ============================================================================= - # STATIC UTILITY METHODS - # ============================================================================= - @staticmethod def map_provider(ld_provider_name: str) -> str: """ @@ -201,7 +181,6 @@ def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics: output=token_usage.get('completionTokens', 0) or token_usage.get('completion_tokens', 0), ) - # LangChain responses that complete successfully are considered successful by default return LDAIMetrics(success=True, usage=usage) @staticmethod From f0e8aa02d510f02914165e5b37d356d678e72ef4 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 22:19:42 +0100 Subject: [PATCH 14/15] update response handling in LangChainProvider to improve error logging and ensure structured output is a dictionary. Update metrics handling accordingly. --- .../src/ldai_langchain/langchain_provider.py | 36 ++++++++++--------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index 2c78bcf..2fa59b4 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -93,24 +93,28 @@ async def invoke_structured_model( structured_llm = self._llm.with_structured_output(response_structure) response = await structured_llm.ainvoke(langchain_messages) - metrics = LDAIMetrics( - success=True, - usage=TokenUsage(total=0, input=0, output=0), - ) - - if isinstance(response, dict): - raw_response = str(response) - else: - import json - try: - raw_response = json.dumps(response) - except (TypeError, ValueError): - raw_response = str(response) + if not isinstance(response, dict): + if self.logger: + self.logger.warn( + f'Structured output did not return a dict. ' + f'Got: {type(response)}' + ) + return StructuredResponse( + data={}, + raw_response='', + metrics=LDAIMetrics( + success=False, + usage=TokenUsage(total=0, input=0, output=0), + ), + ) return StructuredResponse( - data=response if isinstance(response, dict) else {'result': response}, - raw_response=raw_response, - metrics=metrics, + data=response, + raw_response=str(response), + metrics=LDAIMetrics( + success=True, + usage=TokenUsage(total=0, input=0, output=0), + ), ) except Exception as error: if self.logger: From d048a374f4d88958f80655fb22575c29055c75e3 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Fri, 19 Dec 2025 22:22:05 +0100 Subject: [PATCH 15/15] make configuration handling to streamline model and provider extraction from ai_config --- .../src/ldai_langchain/langchain_provider.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py index 2fa59b4..d710809 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -228,14 +228,14 @@ def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: """ from langchain.chat_models import init_chat_model - model_name = ai_config.model.name if ai_config.model else '' - provider = ai_config.provider.name if ai_config.provider else '' - parameters = {} + config_dict = ai_config.to_dict() + model_dict = config_dict.get('model') or {} + provider_dict = config_dict.get('provider') or {} - if ai_config.model and hasattr(ai_config.model, '_parameters') and ai_config.model._parameters: - parameters = ai_config.model._parameters.copy() + model_name = model_dict.get('name', '') + provider = provider_dict.get('name', '') + parameters = model_dict.get('parameters') or {} - # Use LangChain's universal init_chat_model to support multiple providers return init_chat_model( model_name, model_provider=LangChainProvider.map_provider(provider),