Skip to content

Commit 185b3b2

Browse files
committed
Revert Vercel AI SDK adoption and implement clean abstraction for LLM providers
1 parent a109aed commit 185b3b2

File tree

13 files changed

+860
-177
lines changed

13 files changed

+860
-177
lines changed

packages/agent/package.json

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,18 +44,13 @@
4444
"author": "Ben Houston",
4545
"license": "MIT",
4646
"dependencies": {
47-
"@ai-sdk/anthropic": "^1.1.13",
48-
"@ai-sdk/mistral": "^1.1.13",
49-
"@ai-sdk/openai": "^1.2.0",
50-
"@ai-sdk/xai": "^1.1.12",
47+
"@anthropic-ai/sdk": "^0.16.0",
5148
"@mozilla/readability": "^0.5.0",
5249
"@playwright/test": "^1.50.1",
5350
"@vitest/browser": "^3.0.5",
54-
"ai": "^4.1.50",
5551
"chalk": "^5.4.1",
5652
"dotenv": "^16",
5753
"jsdom": "^26.0.0",
58-
"ollama-ai-provider": "^1.2.0",
5954
"playwright": "^1.50.1",
6055
"uuid": "^11",
6156
"zod": "^3.24.2",
Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
/**
2+
* Core LLM abstraction for generating text
3+
*/
4+
import { FunctionDefinition, GenerateOptions, LLMResponse, Message, ToolCall } from './types.js';
5+
import { LLMProvider } from './provider.js';
6+
7+
/**
8+
* Generate text using the specified LLM provider
9+
*
10+
* @param provider The LLM provider implementation
11+
* @param options Options for generation including messages, functions, etc.
12+
* @returns A response containing generated text and/or tool calls
13+
*/
14+
export async function generateText(
15+
provider: LLMProvider,
16+
options: GenerateOptions
17+
): Promise<LLMResponse> {
18+
// Validate options
19+
if (!options.messages || options.messages.length === 0) {
20+
throw new Error('Messages array cannot be empty');
21+
}
22+
23+
// Use the provider to generate the response
24+
return provider.generateText(options);
25+
}
26+
27+
/**
28+
* Format tool calls for consistent usage across providers
29+
*
30+
* @param rawToolCalls Tool calls from provider
31+
* @returns Normalized tool calls
32+
*/
33+
export function normalizeToolCalls(rawToolCalls: any[]): ToolCall[] {
34+
if (!rawToolCalls || !Array.isArray(rawToolCalls) || rawToolCalls.length === 0) {
35+
return [];
36+
}
37+
38+
return rawToolCalls.map((call) => {
39+
// Handle different provider formats
40+
if (typeof call.arguments === 'string') {
41+
// Already in correct format
42+
return {
43+
id: call.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
44+
name: call.name || call.function?.name,
45+
arguments: call.arguments
46+
};
47+
} else if (typeof call.arguments === 'object') {
48+
// Convert object to JSON string
49+
return {
50+
id: call.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
51+
name: call.name || call.function?.name,
52+
arguments: JSON.stringify(call.arguments)
53+
};
54+
} else {
55+
throw new Error(`Unsupported tool call format: ${JSON.stringify(call)}`);
56+
}
57+
});
58+
}
59+
60+
/**
61+
* Format function definitions for provider compatibility
62+
*
63+
* @param functions Function definitions
64+
* @returns Normalized function definitions
65+
*/
66+
export function normalizeFunctionDefinitions(
67+
functions?: FunctionDefinition[]
68+
): FunctionDefinition[] {
69+
if (!functions || functions.length === 0) {
70+
return [];
71+
}
72+
73+
return functions.map((fn) => ({
74+
name: fn.name,
75+
description: fn.description,
76+
parameters: fn.parameters
77+
}));
78+
}
79+
80+
/**
81+
* Convert messages to provider-specific format if needed
82+
*
83+
* @param messages Array of messages
84+
* @returns Normalized messages
85+
*/
86+
export function normalizeMessages(messages: Message[]): Message[] {
87+
return messages.map((msg: any) => {
88+
// Ensure content is a string
89+
if (typeof msg.content !== 'string') {
90+
throw new Error(`Message content must be a string: ${JSON.stringify(msg)}`);
91+
}
92+
93+
// Handle each role type explicitly
94+
switch (msg.role) {
95+
case 'system':
96+
return {
97+
role: 'system',
98+
content: msg.content
99+
};
100+
case 'user':
101+
return {
102+
role: 'user',
103+
content: msg.content
104+
};
105+
case 'assistant':
106+
return {
107+
role: 'assistant',
108+
content: msg.content
109+
};
110+
case 'tool':
111+
return {
112+
role: 'tool',
113+
content: msg.content,
114+
name: msg.name || 'unknown_tool' // Ensure name is always present for tool messages
115+
};
116+
default:
117+
// Use type assertion for unknown roles
118+
console.warn(`Unexpected message role: ${String(msg.role)}, treating as user message`);
119+
return {
120+
role: 'user',
121+
content: msg.content
122+
};
123+
}
124+
});
125+
}
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
/**
2+
* Examples of using the LLM abstraction
3+
*/
4+
import { createProvider, generateText } from './index.js';
5+
import { FunctionDefinition, Message } from './types.js';
6+
7+
/**
8+
* Example of using the OpenAI provider
9+
*/
10+
async function openaiExample() {
11+
// Create an OpenAI provider
12+
const provider = createProvider('openai', 'gpt-4', {
13+
apiKey: process.env.OPENAI_API_KEY,
14+
});
15+
16+
// Define messages
17+
const messages: Message[] = [
18+
{
19+
role: 'system',
20+
content: 'You are a helpful assistant that can use tools to accomplish tasks.',
21+
},
22+
{
23+
role: 'user',
24+
content: 'What is the weather in New York?',
25+
},
26+
];
27+
28+
// Define functions/tools
29+
const functions: FunctionDefinition[] = [
30+
{
31+
name: 'get_weather',
32+
description: 'Get the current weather in a location',
33+
parameters: {
34+
type: 'object',
35+
properties: {
36+
location: {
37+
type: 'string',
38+
description: 'The city and state, e.g. San Francisco, CA',
39+
},
40+
unit: {
41+
type: 'string',
42+
enum: ['celsius', 'fahrenheit'],
43+
description: 'The unit of temperature',
44+
},
45+
},
46+
required: ['location'],
47+
},
48+
},
49+
];
50+
51+
// Generate text
52+
const response = await generateText(provider, {
53+
messages,
54+
functions,
55+
temperature: 0.7,
56+
maxTokens: 1000,
57+
});
58+
59+
console.log('Generated text:', response.text);
60+
console.log('Tool calls:', response.toolCalls);
61+
62+
// Handle tool calls
63+
if (response.toolCalls.length > 0) {
64+
const toolCall = response.toolCalls[0];
65+
if (toolCall) {
66+
console.log(`Tool called: ${toolCall.name}`);
67+
console.log(`Arguments: ${toolCall.arguments}`);
68+
69+
// Example of adding a tool result
70+
const toolResult: Message = {
71+
role: 'tool',
72+
name: toolCall.name,
73+
content: JSON.stringify({
74+
temperature: 72,
75+
unit: 'fahrenheit',
76+
description: 'Sunny with some clouds',
77+
}),
78+
};
79+
80+
// Continue the conversation with the tool result
81+
const followupResponse = await generateText(provider, {
82+
messages: [
83+
...messages,
84+
{
85+
role: 'assistant',
86+
content: response.text,
87+
},
88+
toolResult,
89+
],
90+
temperature: 0.7,
91+
maxTokens: 1000,
92+
});
93+
94+
console.log('Follow-up response:', followupResponse.text);
95+
}
96+
}
97+
}
98+
99+
// Example usage
100+
// openaiExample().catch(console.error);
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
/**
2+
* LLM abstraction module
3+
*/
4+
5+
// Export message types
6+
export * from './types.js';
7+
8+
// Export core functionality
9+
export * from './core.js';
10+
11+
// Export provider interface
12+
export * from './provider.js';
13+
14+
// Export provider implementations
15+
export * from './providers/openai.js';
16+
export * from './providers/index.js';
17+
18+
// Re-export the main function for convenience
19+
import { generateText } from './core.js';
20+
export { generateText };
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
/**
2+
* LLM Provider interface and factory
3+
*/
4+
import { GenerateOptions, LLMResponse, ProviderOptions } from './types.js';
5+
6+
/**
7+
* Interface for LLM providers
8+
*/
9+
export interface LLMProvider {
10+
/**
11+
* Provider name (e.g., 'openai', 'anthropic', etc.)
12+
*/
13+
name: string;
14+
15+
/**
16+
* Provider-specific identifier (e.g., 'openai.chat', 'anthropic.messages', etc.)
17+
*/
18+
provider: string;
19+
20+
/**
21+
* Model name (e.g., 'gpt-4', 'claude-3', etc.)
22+
*/
23+
model: string;
24+
25+
/**
26+
* Generate text using this provider
27+
*
28+
* @param options Generation options
29+
* @returns Response with text and/or tool calls
30+
*/
31+
generateText(options: GenerateOptions): Promise<LLMResponse>;
32+
33+
/**
34+
* Get the number of tokens in a given text
35+
*
36+
* @param text Text to count tokens for
37+
* @returns Number of tokens
38+
*/
39+
countTokens(text: string): Promise<number>;
40+
}
41+
42+
/**
43+
* Factory function to create a provider
44+
*
45+
* @param providerType Provider type (e.g., 'openai', 'anthropic')
46+
* @param model Model name
47+
* @param options Provider-specific options
48+
* @returns LLM provider instance
49+
*/
50+
export { createProvider, registerProvider } from './providers/index.js';

0 commit comments

Comments
 (0)