|
1 | 1 | import { describe, it, expect } from "bun:test"; |
2 | 2 | import { getPreferredNameModel } from "./workspaceTitleGenerator"; |
3 | | -import type { Config } from "@/node/config"; |
| 3 | +import type { AIService } from "./aiService"; |
| 4 | +import { getKnownModel } from "@/common/constants/knownModels"; |
| 5 | + |
| 6 | +// Helper to create a mock AIService that succeeds for specific models |
| 7 | +function createMockAIService(availableModels: string[]): AIService { |
| 8 | + return { |
| 9 | + createModel: async (modelString: string) => { |
| 10 | + if (availableModels.includes(modelString)) { |
| 11 | + return { success: true, data: {} as never }; |
| 12 | + } |
| 13 | + return { success: false, error: { type: "api_key_not_found", provider: "test" } }; |
| 14 | + }, |
| 15 | + } as unknown as AIService; |
| 16 | +} |
4 | 17 |
|
5 | 18 | describe("workspaceTitleGenerator", () => { |
6 | | - it("getPreferredNameModel returns null when no providers configured", () => { |
7 | | - // Save and clear env vars |
8 | | - const savedAnthropicKey = process.env.ANTHROPIC_API_KEY; |
9 | | - const savedAnthropicToken = process.env.ANTHROPIC_AUTH_TOKEN; |
10 | | - delete process.env.ANTHROPIC_API_KEY; |
11 | | - delete process.env.ANTHROPIC_AUTH_TOKEN; |
| 19 | + const HAIKU_ID = getKnownModel("HAIKU").id; |
| 20 | + const GPT_MINI_ID = getKnownModel("GPT_MINI").id; |
12 | 21 |
|
13 | | - try { |
14 | | - const mockConfig = { |
15 | | - loadProvidersConfig: () => null, |
16 | | - } as unknown as Config; |
17 | | - |
18 | | - expect(getPreferredNameModel(mockConfig)).toBeNull(); |
19 | | - } finally { |
20 | | - // Restore env vars |
21 | | - if (savedAnthropicKey) process.env.ANTHROPIC_API_KEY = savedAnthropicKey; |
22 | | - if (savedAnthropicToken) process.env.ANTHROPIC_AUTH_TOKEN = savedAnthropicToken; |
23 | | - } |
| 22 | + it("getPreferredNameModel returns null when no models available", async () => { |
| 23 | + const aiService = createMockAIService([]); |
| 24 | + expect(await getPreferredNameModel(aiService)).toBeNull(); |
24 | 25 | }); |
25 | 26 |
|
26 | | - it("getPreferredNameModel prefers anthropic when configured", () => { |
27 | | - const mockConfig = { |
28 | | - loadProvidersConfig: () => ({ |
29 | | - anthropic: { apiKey: "test-key" }, |
30 | | - }), |
31 | | - } as unknown as Config; |
32 | | - |
33 | | - const model = getPreferredNameModel(mockConfig); |
34 | | - expect(model).toContain("anthropic"); |
| 27 | + it("getPreferredNameModel prefers Haiku when available", async () => { |
| 28 | + const aiService = createMockAIService([HAIKU_ID, GPT_MINI_ID]); |
| 29 | + const model = await getPreferredNameModel(aiService); |
| 30 | + expect(model).toBe(HAIKU_ID); |
35 | 31 | }); |
36 | 32 |
|
37 | | - it("getPreferredNameModel falls back to openai when anthropic not configured", () => { |
38 | | - // Save and clear env vars |
39 | | - const savedAnthropicKey = process.env.ANTHROPIC_API_KEY; |
40 | | - const savedAnthropicToken = process.env.ANTHROPIC_AUTH_TOKEN; |
41 | | - delete process.env.ANTHROPIC_API_KEY; |
42 | | - delete process.env.ANTHROPIC_AUTH_TOKEN; |
43 | | - |
44 | | - try { |
45 | | - const mockConfig = { |
46 | | - loadProvidersConfig: () => ({ |
47 | | - openai: { apiKey: "test-key" }, |
48 | | - }), |
49 | | - } as unknown as Config; |
50 | | - |
51 | | - const model = getPreferredNameModel(mockConfig); |
52 | | - expect(model).toContain("openai"); |
53 | | - } finally { |
54 | | - // Restore env vars |
55 | | - if (savedAnthropicKey) process.env.ANTHROPIC_API_KEY = savedAnthropicKey; |
56 | | - if (savedAnthropicToken) process.env.ANTHROPIC_AUTH_TOKEN = savedAnthropicToken; |
57 | | - } |
| 33 | + it("getPreferredNameModel falls back to GPT Mini when Haiku unavailable", async () => { |
| 34 | + const aiService = createMockAIService([GPT_MINI_ID]); |
| 35 | + const model = await getPreferredNameModel(aiService); |
| 36 | + expect(model).toBe(GPT_MINI_ID); |
58 | 37 | }); |
59 | 38 | }); |
0 commit comments