From c3dd0eae3018908b6a8870bf1811918d0c0863f3 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 20 Jan 2026 20:32:37 +1100 Subject: [PATCH 1/4] feat: Introduce fal.ai adapter for image and video generation Add @tanstack/ai-fal package with: - Image adapter supporting 600+ fal.ai models with full type inference - Video adapter (experimental) for MiniMax, Luma, Kling, Hunyuan, etc. - Type-safe modelOptions using fal's EndpointTypeMap for autocomplete - FalModel, FalModelInput, FalModelOutput utility types - FalImageProviderOptions/FalVideoProviderOptions that exclude fields TanStack AI handles (prompt, size, etc.) - Size preset mapping utilities for fal.ai format - Comprehensive test coverage for both adapters Co-Authored-By: Claude Opus 4.5 --- .claude/settings.local.json | 10 +- .gitignore | 4 +- packages/typescript/ai-fal/package.json | 54 ++++ .../typescript/ai-fal/src/adapters/image.ts | 212 ++++++++++++++ .../typescript/ai-fal/src/adapters/video.ts | 191 +++++++++++++ .../src/image/image-provider-options.ts | 64 +++++ packages/typescript/ai-fal/src/index.ts | 50 ++++ packages/typescript/ai-fal/src/model-meta.ts | 60 ++++ .../typescript/ai-fal/src/utils/client.ts | 56 ++++ packages/typescript/ai-fal/src/utils/index.ts | 6 + .../ai-fal/tests/image-adapter.test.ts | 258 +++++++++++++++++ .../ai-fal/tests/video-adapter.test.ts | 260 ++++++++++++++++++ packages/typescript/ai-fal/tsconfig.json | 8 + packages/typescript/ai-fal/vite.config.ts | 36 +++ pnpm-lock.yaml | 45 ++- 15 files changed, 1311 insertions(+), 3 deletions(-) create mode 100644 packages/typescript/ai-fal/package.json create mode 100644 packages/typescript/ai-fal/src/adapters/image.ts create mode 100644 packages/typescript/ai-fal/src/adapters/video.ts create mode 100644 packages/typescript/ai-fal/src/image/image-provider-options.ts create mode 100644 packages/typescript/ai-fal/src/index.ts create mode 100644 packages/typescript/ai-fal/src/model-meta.ts create mode 100644 packages/typescript/ai-fal/src/utils/client.ts create mode 100644 packages/typescript/ai-fal/src/utils/index.ts create mode 100644 packages/typescript/ai-fal/tests/image-adapter.test.ts create mode 100644 packages/typescript/ai-fal/tests/video-adapter.test.ts create mode 100644 packages/typescript/ai-fal/tsconfig.json create mode 100644 packages/typescript/ai-fal/vite.config.ts diff --git a/.claude/settings.local.json b/.claude/settings.local.json index fd03337a..082c4ee2 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -7,7 +7,15 @@ "Bash(pnpm test:lib:*)", "Bash(pnpm typecheck:*)", "Bash(pnpm build:*)", - "Bash(find:*)" + "Bash(find:*)", + "Bash(tree:*)", + "WebSearch", + "WebFetch(domain:fal.ai)", + "mcp__fal__SearchFal" ] + }, + "sandbox": { + "enabled": true, + "autoAllowBashIfSandboxed": true } } diff --git a/.gitignore b/.gitignore index 15e281c7..5f291851 100644 --- a/.gitignore +++ b/.gitignore @@ -52,4 +52,6 @@ vite.config.ts.timestamp-* test-traces **/adapters/output .nitro -.output \ No newline at end of file +.output + +.claude/settings.local.json diff --git a/packages/typescript/ai-fal/package.json b/packages/typescript/ai-fal/package.json new file mode 100644 index 00000000..35556de7 --- /dev/null +++ b/packages/typescript/ai-fal/package.json @@ -0,0 +1,54 @@ +{ + "name": "@tanstack/ai-fal", + "version": "0.1.0", + "description": "fal.ai adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-fal" + }, + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "fal", + "tanstack", + "adapter", + "image-generation", + "video-generation" + ], + "dependencies": { + "@fal-ai/client": "^1.8.3", + "@tanstack/ai": "workspace:*" + }, + "devDependencies": { + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:*" + } +} diff --git a/packages/typescript/ai-fal/src/adapters/image.ts b/packages/typescript/ai-fal/src/adapters/image.ts new file mode 100644 index 00000000..1c4ab609 --- /dev/null +++ b/packages/typescript/ai-fal/src/adapters/image.ts @@ -0,0 +1,212 @@ +import { fal } from '@fal-ai/client' +import { BaseImageAdapter } from '@tanstack/ai/adapters' +import { + configureFalClient, + getFalApiKeyFromEnv, + generateId as utilGenerateId, +} from '../utils' +import type { FalClientConfig } from '../utils' +import type { + GeneratedImage, + ImageGenerationOptions, + ImageGenerationResult, +} from '@tanstack/ai' +import type { + FalImageProviderOptions, + FalModel, + FalModelOutput, +} from '../model-meta' + +export interface FalImageConfig extends Omit { + apiKey?: string +} + +/** + * fal.ai image generation adapter with full type inference. + * + * Uses fal.ai's comprehensive type system to provide autocomplete + * + * and type safety for all 600+ supported models. + * + * @example + * ```typescript + * const adapter = falImage('fal-ai/flux/dev') + * const result = await adapter.generateImages({ + * model: 'fal-ai/flux/dev', + * prompt: 'a cat', + * modelOptions: { + * num_inference_steps: 28, // Type-safe! Autocomplete works + * guidance_scale: 3.5, + * }, + * }) + * ``` + */ +export class FalImageAdapter extends BaseImageAdapter< + TModel, + FalImageProviderOptions, + Record>, + Record +> { + readonly kind = 'image' as const + readonly name = 'fal' as const + + constructor(apiKey: string, model: TModel, config?: FalImageConfig) { + super({}, model) + configureFalClient({ apiKey, proxyUrl: config?.proxyUrl }) + } + + async generateImages( + options: ImageGenerationOptions>, + ): Promise { + const { model, prompt, numberOfImages, size, modelOptions } = options + + // Build the input object - spread modelOptions first, then override with standard options + const input: Record = { + ...(modelOptions as Record), + prompt, + } + + // Map size to fal.ai format if provided + if (size) { + input.image_size = this.mapSizeToFalFormat(size) + } + + // Add number of images if specified + if (numberOfImages) { + input.num_images = numberOfImages + } + + const result = await fal.subscribe(model, { input }) + + return this.transformResponse( + model, + result as { data: FalModelOutput; requestId: string }, + ) + } + + protected override generateId(): string { + return utilGenerateId(this.name) + } + + /** + * Maps TanStack AI size format (WIDTHxHEIGHT) to fal.ai format. + * fal.ai accepts either preset names or { width, height } objects. + */ + private mapSizeToFalFormat( + size: string, + ): string | { width: number; height: number } { + const SIZE_TO_FAL_PRESET: Record = { + '1024x1024': 'square_hd', + '512x512': 'square', + '1024x768': 'landscape_4_3', + '768x1024': 'portrait_4_3', + '1280x720': 'landscape_16_9', + '720x1280': 'portrait_16_9', + '1920x1080': 'landscape_16_9', + '1080x1920': 'portrait_16_9', + } + + // Check if it's a known preset mapping + const preset = SIZE_TO_FAL_PRESET[size] + if (preset) return preset + + // Try to parse as WIDTHxHEIGHT + const match = size.match(/^(\d+)x(\d+)$/) + if (match && match[1] && match[2]) { + return { + width: parseInt(match[1], 10), + height: parseInt(match[2], 10), + } + } + + // Return as-is if it's already a preset name + return size + } + + private transformResponse( + model: string, + response: { data: FalModelOutput; requestId: string }, + ): ImageGenerationResult { + const images: Array = [] + const data = response.data as Record + + // Handle array of images (most models return { images: [...] }) + if ('images' in data && Array.isArray(data.images)) { + for (const img of data.images as Array<{ url: string }>) { + images.push(this.parseImage(img)) + } + } + // Handle single image response (some models return { image: {...} }) + else if ('image' in data && data.image && typeof data.image === 'object') { + images.push(this.parseImage(data.image as { url: string })) + } + + return { + id: response.requestId || this.generateId(), + model, + images, + } + } + + private parseImage(img: { url: string }): GeneratedImage { + const url = img.url + // Check if it's a base64 data URL + if (url.startsWith('data:')) { + const base64Match = url.match(/^data:image\/[^;]+;base64,(.+)$/) + if (base64Match) { + return { + b64Json: base64Match[1], + url, + } + } + } + return { url } + } +} + +/** + * Create a fal.ai image adapter with an explicit API key. + * + * @example + * ```typescript + * const adapter = createFalImage('fal-ai/flux-pro/v1.1-ultra', process.env.FAL_KEY!) + * ``` + */ +export function createFalImage( + model: TModel, + apiKey: string, + config?: FalImageConfig, +): FalImageAdapter { + return new FalImageAdapter(apiKey, model, config) +} + +/** + * Create a fal.ai image adapter using the FAL_KEY environment variable. + * + * The model parameter accepts any fal.ai model ID with full type inference. + * As you type, you'll get autocomplete for all 600+ supported models. + * + * @example + * ```typescript + * // Full autocomplete as you type the model name + * const adapter = falImage('fal-ai/flux/dev') + * + * // modelOptions are type-safe based on the model + * const result = await adapter.generateImages({ + * model: 'fal-ai/flux/dev', + * prompt: 'a cat', + * modelOptions: { + * num_inference_steps: 28, + * guidance_scale: 3.5, + * seed: 12345, + * }, + * }) + * ``` + */ +export function falImage( + model: TModel, + config?: FalImageConfig, +): FalImageAdapter { + const apiKey = getFalApiKeyFromEnv() + return createFalImage(model, apiKey, config) +} diff --git a/packages/typescript/ai-fal/src/adapters/video.ts b/packages/typescript/ai-fal/src/adapters/video.ts new file mode 100644 index 00000000..5b727723 --- /dev/null +++ b/packages/typescript/ai-fal/src/adapters/video.ts @@ -0,0 +1,191 @@ +import { fal } from '@fal-ai/client' +import { BaseVideoAdapter } from '@tanstack/ai/adapters' +import { + configureFalClient, + getFalApiKeyFromEnv, + generateId as utilGenerateId, +} from '../utils' +import type { FalClientConfig } from '../utils' +import type { + VideoGenerationOptions, + VideoJobResult, + VideoStatusResult, + VideoUrlResult, +} from '@tanstack/ai' +import type { FalModel, FalVideoProviderOptions } from '../model-meta' + +export interface FalVideoConfig extends Omit { + apiKey?: string +} + +type FalQueueStatus = 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + +interface FalStatusResponse { + status: FalQueueStatus + queue_position?: number + logs?: Array<{ message: string }> +} + +interface FalVideoResultData { + video?: { url: string } + video_url?: string +} + +/** + * Maps fal.ai queue status to TanStack AI video status. + */ +function mapFalStatusToVideoStatus( + falStatus: FalQueueStatus, +): VideoStatusResult['status'] { + switch (falStatus) { + case 'IN_QUEUE': + return 'pending' + case 'IN_PROGRESS': + return 'processing' + case 'COMPLETED': + return 'completed' + default: + return 'processing' + } +} + +/** + * fal.ai video generation adapter. + * Supports MiniMax, Luma, Kling, Hunyuan, and other fal.ai video models. + * + * Uses fal.ai's comprehensive type system to provide autocomplete + * and type safety for all supported video models. + * + * @experimental Video generation is an experimental feature and may change. + */ +export class FalVideoAdapter< + TModel extends FalModel, +> extends BaseVideoAdapter> { + readonly kind = 'video' as const + readonly name = 'fal' as const + + constructor(apiKey: string, model: TModel, config?: FalVideoConfig) { + super({}, model) + configureFalClient({ apiKey, proxyUrl: config?.proxyUrl }) + } + + async createVideoJob( + options: VideoGenerationOptions>, + ): Promise { + const { model, prompt, size, duration, modelOptions } = options + + // Build the input object for fal.ai + const input: Record = { + prompt, + } + + // Add duration if specified + if (duration) { + input.duration = duration + } + + // Parse size to aspect ratio if provided + if (size) { + const aspectRatio = this.sizeToAspectRatio(size) + if (aspectRatio) { + input.aspect_ratio = aspectRatio + } + } + + // Merge model-specific options + if (modelOptions) { + Object.assign(input, modelOptions) + } + + // Submit to queue and get request ID + const { request_id } = await fal.queue.submit(model, { + input, + }) + + return { + jobId: request_id, + model, + } + } + + async getVideoStatus(jobId: string): Promise { + const statusResponse = (await fal.queue.status(this.model, { + requestId: jobId, + logs: true, + })) as FalStatusResponse + + return { + jobId, + status: mapFalStatusToVideoStatus(statusResponse.status), + progress: statusResponse.queue_position + ? Math.max(0, 100 - statusResponse.queue_position * 10) + : undefined, + } + } + + async getVideoUrl(jobId: string): Promise { + const result = await fal.queue.result(this.model, { + requestId: jobId, + }) + + const data = result.data as FalVideoResultData + + // Different models return video URL in different formats + const url = data.video?.url || data.video_url + if (!url) { + throw new Error('Video URL not found in response') + } + + return { + jobId, + url, + } + } + + protected override generateId(): string { + return utilGenerateId(this.name) + } + + /** + * Convert WIDTHxHEIGHT size format to aspect ratio. + */ + private sizeToAspectRatio(size: string): string | undefined { + const match = size.match(/^(\d+)x(\d+)$/) + if (!match || !match[1] || !match[2]) return undefined + + const width = parseInt(match[1], 10) + const height = parseInt(match[2], 10) + + // Calculate GCD for simplest ratio + const gcd = (a: number, b: number): number => (b === 0 ? a : gcd(b, a % b)) + const divisor = gcd(width, height) + + return `${width / divisor}:${height / divisor}` + } +} + +/** + * Create a fal.ai video adapter with an explicit API key. + * + * @experimental Video generation is an experimental feature and may change. + */ +export function createFalVideo( + model: TModel, + apiKey: string, + config?: FalVideoConfig, +): FalVideoAdapter { + return new FalVideoAdapter(apiKey, model, config) +} + +/** + * Create a fal.ai video adapter using the FAL_KEY environment variable. + * + * @experimental Video generation is an experimental feature and may change. + */ +export function falVideo( + model: TModel, + config?: FalVideoConfig, +): FalVideoAdapter { + const apiKey = getFalApiKeyFromEnv() + return createFalVideo(model, apiKey, config) +} diff --git a/packages/typescript/ai-fal/src/image/image-provider-options.ts b/packages/typescript/ai-fal/src/image/image-provider-options.ts new file mode 100644 index 00000000..de1c2eac --- /dev/null +++ b/packages/typescript/ai-fal/src/image/image-provider-options.ts @@ -0,0 +1,64 @@ +/** + * fal.ai image size presets supported by most models. + * These are semantic names that fal.ai accepts directly. + */ +export type FalImageSizePreset = + | 'square_hd' + | 'square' + | 'landscape_4_3' + | 'landscape_16_9' + | 'portrait_4_3' + | 'portrait_16_9' + +/** + * Mapping of standard TanStack AI sizes to fal.ai size presets. + */ +const SIZE_TO_FAL_PRESET: Record = { + '1024x1024': 'square_hd', + '512x512': 'square', + '1024x768': 'landscape_4_3', + '768x1024': 'portrait_4_3', + '1280x720': 'landscape_16_9', + '720x1280': 'portrait_16_9', + '1920x1080': 'landscape_16_9', + '1080x1920': 'portrait_16_9', +} + +/** + * Maps TanStack AI size format (WIDTHxHEIGHT) to fal.ai format. + * fal.ai accepts either preset names or { width, height } objects. + */ +export function mapSizeToFalFormat( + size: string | undefined, +): FalImageSizePreset | { width: number; height: number } | undefined { + if (!size) return undefined + + // Check if it's a known preset mapping + const preset = SIZE_TO_FAL_PRESET[size] + if (preset) return preset + + // Try to parse as WIDTHxHEIGHT + const match = size.match(/^(\d+)x(\d+)$/) + if (match && match[1] && match[2]) { + return { + width: parseInt(match[1], 10), + height: parseInt(match[2], 10), + } + } + + // If it's already a preset name, return as-is + if ( + [ + 'square_hd', + 'square', + 'landscape_4_3', + 'landscape_16_9', + 'portrait_4_3', + 'portrait_16_9', + ].includes(size) + ) { + return size as FalImageSizePreset + } + + return undefined +} diff --git a/packages/typescript/ai-fal/src/index.ts b/packages/typescript/ai-fal/src/index.ts new file mode 100644 index 00000000..2fa6061a --- /dev/null +++ b/packages/typescript/ai-fal/src/index.ts @@ -0,0 +1,50 @@ +// ============================================================================ +// Image Adapter +// ============================================================================ + +export { + FalImageAdapter, + createFalImage, + falImage, + type FalImageConfig, +} from './adapters/image' + +export { + mapSizeToFalFormat, + type FalImageSizePreset, +} from './image/image-provider-options' + +// ============================================================================ +// Video Adapter (Experimental) +// ============================================================================ + +export { + FalVideoAdapter, + createFalVideo, + falVideo, + type FalVideoConfig, +} from './adapters/video' + +// ============================================================================ +// Model Types (from fal.ai's type system) +// ============================================================================ + +export { + type EndpointTypeMap, + type FalModel, + type FalModelInput, + type FalModelOutput, + type FalImageProviderOptions, + type FalVideoProviderOptions, +} from './model-meta' + +// ============================================================================ +// Utils +// ============================================================================ + +export { + getFalApiKeyFromEnv, + configureFalClient, + generateId, + type FalClientConfig, +} from './utils' diff --git a/packages/typescript/ai-fal/src/model-meta.ts b/packages/typescript/ai-fal/src/model-meta.ts new file mode 100644 index 00000000..dc267e2f --- /dev/null +++ b/packages/typescript/ai-fal/src/model-meta.ts @@ -0,0 +1,60 @@ +/** + * Re-export fal.ai's comprehensive type system for full model support. + * The fal.ai SDK provides types for 600+ models through EndpointTypeMap. + * These types give you full autocomplete and type safety for any model. + */ +import type { EndpointTypeMap } from '@fal-ai/client/endpoints' + +export type { EndpointTypeMap } from '@fal-ai/client/endpoints' + +/** + * All known fal.ai model IDs with autocomplete support. + * Also accepts any string for custom/new models. + */ +export type FalModel = keyof EndpointTypeMap | (string & {}) + +/** + * Utility type to extract the input type for a specific fal model. + * + * @example + * type FluxInput = FalModelInput<'fal-ai/flux/dev'> + * // { prompt: string; num_inference_steps?: number; ... } + */ +export type FalModelInput = + TModel extends keyof EndpointTypeMap + ? EndpointTypeMap[TModel]['input'] + : Record + +/** + * Utility type to extract the output type for a specific fal model. + * + * @example + * type FluxOutput = FalModelOutput<'fal-ai/flux/dev'> + * // { images: Array; seed: number; ... } + */ +export type FalModelOutput = + TModel extends keyof EndpointTypeMap + ? EndpointTypeMap[TModel]['output'] + : unknown + +/** + * Provider options for image generation, excluding fields TanStack AI handles. + * Use this for the `modelOptions` parameter in image generation. + * + * @example + * type FluxOptions = FalImageProviderOptions<'fal-ai/flux/dev'> + * // { num_inference_steps?: number; guidance_scale?: number; seed?: number; ... } + */ +export type FalImageProviderOptions = Omit< + FalModelInput, + 'prompt' | 'image_size' | 'num_images' +> + +/** + * Provider options for video generation, excluding fields TanStack AI handles. + * Use this for the `modelOptions` parameter in video generation. + */ +export type FalVideoProviderOptions = Omit< + FalModelInput, + 'prompt' | 'aspect_ratio' | 'duration' +> diff --git a/packages/typescript/ai-fal/src/utils/client.ts b/packages/typescript/ai-fal/src/utils/client.ts new file mode 100644 index 00000000..96ba8c73 --- /dev/null +++ b/packages/typescript/ai-fal/src/utils/client.ts @@ -0,0 +1,56 @@ +import { fal } from '@fal-ai/client' + +export interface FalClientConfig { + apiKey: string + proxyUrl?: string +} + +interface EnvObject { + FAL_KEY?: string +} + +interface WindowWithEnv { + env?: EnvObject +} + +function getEnvironment(): EnvObject | undefined { + if (typeof globalThis !== 'undefined') { + const win = (globalThis as { window?: WindowWithEnv }).window + if (win?.env) { + return win.env + } + } + if (typeof process !== 'undefined') { + return process.env as EnvObject + } + return undefined +} + +export function getFalApiKeyFromEnv(): string { + const env = getEnvironment() + const key = env?.FAL_KEY + + if (!key) { + throw new Error( + 'FAL_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +export function configureFalClient(config: FalClientConfig): void { + if (config.proxyUrl) { + fal.config({ + proxyUrl: config.proxyUrl, + }) + } else { + fal.config({ + credentials: config.apiKey, + }) + } +} + +export function generateId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +} diff --git a/packages/typescript/ai-fal/src/utils/index.ts b/packages/typescript/ai-fal/src/utils/index.ts new file mode 100644 index 00000000..acd4bfb6 --- /dev/null +++ b/packages/typescript/ai-fal/src/utils/index.ts @@ -0,0 +1,6 @@ +export { + getFalApiKeyFromEnv, + configureFalClient, + generateId, + type FalClientConfig, +} from './client' diff --git a/packages/typescript/ai-fal/tests/image-adapter.test.ts b/packages/typescript/ai-fal/tests/image-adapter.test.ts new file mode 100644 index 00000000..20f5135c --- /dev/null +++ b/packages/typescript/ai-fal/tests/image-adapter.test.ts @@ -0,0 +1,258 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { createFalImage, mapSizeToFalFormat } from '../src' + +// Declare mocks at module level +let mockSubscribe: any +let mockConfig: any + +// Mock the fal.ai client +vi.mock('@fal-ai/client', () => { + return { + fal: { + subscribe: (...args: Array) => mockSubscribe(...args), + config: (...args: Array) => mockConfig(...args), + }, + } +}) + +const createAdapter = () => createFalImage('fal-ai/flux/dev', 'test-key') + +function createMockImageResponse(images: Array<{ url: string }>) { + return { + data: { + images, + }, + requestId: 'req-123', + } +} + +describe('Fal Image Adapter', () => { + beforeEach(() => { + vi.clearAllMocks() + mockSubscribe = vi.fn() + mockConfig = vi.fn() + }) + + it('generates images with correct API call', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image1.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + const result = await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A futuristic city at sunset', + }) + + expect(mockSubscribe).toHaveBeenCalledTimes(1) + + const [model, options] = mockSubscribe.mock.calls[0]! + expect(model).toBe('fal-ai/flux/dev') + expect(options).toMatchObject({ + input: { + prompt: 'A futuristic city at sunset', + }, + }) + + expect(result.images).toHaveLength(1) + expect(result.images[0]!.url).toBe('https://fal.media/files/image1.png') + expect(result.model).toBe('fal-ai/flux/dev') + }) + + it('generates multiple images', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image1.png' }, + { url: 'https://fal.media/files/image2.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + const result = await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A cute robot mascot', + numberOfImages: 2, + }) + + const [, options] = mockSubscribe.mock.calls[0]! + expect(options.input).toMatchObject({ + num_images: 2, + }) + + expect(result.images).toHaveLength(2) + expect(result.images[0]!.url).toBe('https://fal.media/files/image1.png') + expect(result.images[1]!.url).toBe('https://fal.media/files/image2.png') + }) + + it('handles base64 image responses', async () => { + const base64Data = + 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==' + const mockResponse = createMockImageResponse([ + { url: `data:image/png;base64,${base64Data}` }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + const result = await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A simple test image', + }) + + expect(result.images).toHaveLength(1) + expect(result.images[0]!.b64Json).toBe(base64Data) + expect(result.images[0]!.url).toBe(`data:image/png;base64,${base64Data}`) + }) + + it('converts size to fal format preset', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A wide landscape', + size: '1024x768', // Should map to landscape_4_3 + }) + + const [, options] = mockSubscribe.mock.calls[0]! + expect(options.input).toMatchObject({ + image_size: 'landscape_4_3', + }) + }) + + it('converts custom size to width/height object', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A custom size image', + size: '800x600', + }) + + const [, options] = mockSubscribe.mock.calls[0]! + expect(options.input).toMatchObject({ + image_size: { width: 800, height: 600 }, + }) + }) + + it('passes model options correctly', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'Test', + modelOptions: { + num_inference_steps: 28, + guidance_scale: 3.5, + seed: 12345, + }, + }) + + const [, options] = mockSubscribe.mock.calls[0]! + expect(options.input).toMatchObject({ + num_inference_steps: 28, + guidance_scale: 3.5, + seed: 12345, + }) + }) + + it('handles single image response format', async () => { + const mockResponse = { + data: { + image: { url: 'https://fal.media/files/single.png' }, + }, + requestId: 'req-456', + } + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + const result = await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'Single image test', + }) + + expect(result.images).toHaveLength(1) + expect(result.images[0]!.url).toBe('https://fal.media/files/single.png') + }) + + it('throws error on SDK error', async () => { + mockSubscribe.mockRejectedValueOnce(new Error('Model not found')) + + const adapter = createAdapter() + + await expect( + adapter.generateImages({ + model: 'invalid/model', + prompt: 'Test prompt', + }), + ).rejects.toThrow('Model not found') + }) + + it('configures client with API key', () => { + createFalImage('fal-ai/flux/dev', 'my-api-key') + + expect(mockConfig).toHaveBeenCalledWith({ + credentials: 'my-api-key', + }) + }) + + it('configures client with proxy URL when provided', () => { + createFalImage('fal-ai/flux/dev', 'my-api-key', { + proxyUrl: '/api/fal/proxy', + }) + + expect(mockConfig).toHaveBeenCalledWith({ + proxyUrl: '/api/fal/proxy', + }) + }) +}) + +describe('mapSizeToFalFormat', () => { + it('maps known sizes to presets', () => { + expect(mapSizeToFalFormat('1024x1024')).toBe('square_hd') + expect(mapSizeToFalFormat('512x512')).toBe('square') + expect(mapSizeToFalFormat('1024x768')).toBe('landscape_4_3') + expect(mapSizeToFalFormat('768x1024')).toBe('portrait_4_3') + expect(mapSizeToFalFormat('1280x720')).toBe('landscape_16_9') + expect(mapSizeToFalFormat('720x1280')).toBe('portrait_16_9') + }) + + it('parses custom WIDTHxHEIGHT format', () => { + expect(mapSizeToFalFormat('800x600')).toEqual({ width: 800, height: 600 }) + expect(mapSizeToFalFormat('1920x1200')).toEqual({ width: 1920, height: 1200 }) + }) + + it('returns preset names as-is', () => { + expect(mapSizeToFalFormat('square_hd')).toBe('square_hd') + expect(mapSizeToFalFormat('landscape_4_3')).toBe('landscape_4_3') + }) + + it('returns undefined for invalid input', () => { + expect(mapSizeToFalFormat(undefined)).toBeUndefined() + expect(mapSizeToFalFormat('invalid')).toBeUndefined() + }) +}) diff --git a/packages/typescript/ai-fal/tests/video-adapter.test.ts b/packages/typescript/ai-fal/tests/video-adapter.test.ts new file mode 100644 index 00000000..acfa363a --- /dev/null +++ b/packages/typescript/ai-fal/tests/video-adapter.test.ts @@ -0,0 +1,260 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { createFalVideo } from '../src' + +// Declare mocks at module level +let mockQueueSubmit: any +let mockQueueStatus: any +let mockQueueResult: any +let mockConfig: any + +// Mock the fal.ai client +vi.mock('@fal-ai/client', () => { + return { + fal: { + queue: { + submit: (...args: Array) => mockQueueSubmit(...args), + status: (...args: Array) => mockQueueStatus(...args), + result: (...args: Array) => mockQueueResult(...args), + }, + config: (...args: Array) => mockConfig(...args), + }, + } +}) + +const createAdapter = () => + createFalVideo('fal-ai/minimax-video/image-to-video', 'test-key') + +describe('Fal Video Adapter', () => { + beforeEach(() => { + vi.clearAllMocks() + mockQueueSubmit = vi.fn() + mockQueueStatus = vi.fn() + mockQueueResult = vi.fn() + mockConfig = vi.fn() + }) + + describe('createVideoJob', () => { + it('submits video generation job to queue', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-123', + }) + + const adapter = createAdapter() + + const result = await adapter.createVideoJob({ + model: 'fal-ai/minimax-video/image-to-video', + prompt: 'A cat walking in the garden', + }) + + expect(mockQueueSubmit).toHaveBeenCalledTimes(1) + + const [model, options] = mockQueueSubmit.mock.calls[0]! + expect(model).toBe('fal-ai/minimax-video/image-to-video') + expect(options).toMatchObject({ + input: { + prompt: 'A cat walking in the garden', + }, + }) + + expect(result.jobId).toBe('job-123') + expect(result.model).toBe('fal-ai/minimax-video/image-to-video') + }) + + it('includes image URL for image-to-video models', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-456', + }) + + const adapter = createAdapter() + + await adapter.createVideoJob({ + model: 'fal-ai/minimax-video/image-to-video', + prompt: 'A stylish woman walks down a Tokyo street', + modelOptions: { + image_url: 'https://example.com/image.jpg', + }, + }) + + const [, options] = mockQueueSubmit.mock.calls[0]! + expect(options.input).toMatchObject({ + image_url: 'https://example.com/image.jpg', + }) + }) + + it('includes duration option', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-789', + }) + + const adapter = createAdapter() + + await adapter.createVideoJob({ + model: 'fal-ai/minimax-video/image-to-video', + prompt: 'A time lapse of a sunset', + duration: 10, + }) + + const [, options] = mockQueueSubmit.mock.calls[0]! + expect(options.input).toMatchObject({ + duration: 10, + }) + }) + + it('converts size to aspect ratio', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-ar', + }) + + const adapter = createAdapter() + + await adapter.createVideoJob({ + model: 'fal-ai/minimax-video/image-to-video', + prompt: 'A wide landscape video', + size: '1920x1080', // 16:9 + }) + + const [, options] = mockQueueSubmit.mock.calls[0]! + expect(options.input).toMatchObject({ + aspect_ratio: '16:9', + }) + }) + + it('passes model-specific options', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-opts', + }) + + const adapter = createAdapter() + + await adapter.createVideoJob({ + model: 'fal-ai/minimax-video/image-to-video', + prompt: 'Test video', + modelOptions: { + with_audio: true, + seed: 12345, + }, + }) + + const [, options] = mockQueueSubmit.mock.calls[0]! + expect(options.input).toMatchObject({ + with_audio: true, + seed: 12345, + }) + }) + }) + + describe('getVideoStatus', () => { + it('returns pending status for queued jobs', async () => { + mockQueueStatus.mockResolvedValueOnce({ + status: 'IN_QUEUE', + queue_position: 5, + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoStatus('job-123') + + expect(mockQueueStatus).toHaveBeenCalledWith( + 'fal-ai/minimax-video/image-to-video', + { requestId: 'job-123', logs: true }, + ) + + expect(result.jobId).toBe('job-123') + expect(result.status).toBe('pending') + expect(result.progress).toBe(50) // 100 - 5 * 10 = 50 + }) + + it('returns processing status for in-progress jobs', async () => { + mockQueueStatus.mockResolvedValueOnce({ + status: 'IN_PROGRESS', + logs: [{ message: 'Generating frames...' }], + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoStatus('job-456') + + expect(result.status).toBe('processing') + }) + + it('returns completed status for finished jobs', async () => { + mockQueueStatus.mockResolvedValueOnce({ + status: 'COMPLETED', + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoStatus('job-789') + + expect(result.status).toBe('completed') + }) + }) + + describe('getVideoUrl', () => { + it('returns video URL from video object', async () => { + mockQueueResult.mockResolvedValueOnce({ + data: { + video: { url: 'https://fal.media/files/video.mp4' }, + }, + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoUrl('job-123') + + expect(mockQueueResult).toHaveBeenCalledWith( + 'fal-ai/minimax-video/image-to-video', + { requestId: 'job-123' }, + ) + + expect(result.jobId).toBe('job-123') + expect(result.url).toBe('https://fal.media/files/video.mp4') + }) + + it('returns video URL from video_url field', async () => { + mockQueueResult.mockResolvedValueOnce({ + data: { + video_url: 'https://fal.media/files/video2.mp4', + }, + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoUrl('job-456') + + expect(result.url).toBe('https://fal.media/files/video2.mp4') + }) + + it('throws error when video URL is not found', async () => { + mockQueueResult.mockResolvedValueOnce({ + data: {}, + }) + + const adapter = createAdapter() + + await expect(adapter.getVideoUrl('job-789')).rejects.toThrow( + 'Video URL not found in response', + ) + }) + }) + + describe('client configuration', () => { + it('configures client with API key', () => { + createFalVideo('fal-ai/minimax-video/image-to-video', 'my-api-key') + + expect(mockConfig).toHaveBeenCalledWith({ + credentials: 'my-api-key', + }) + }) + + it('configures client with proxy URL when provided', () => { + createFalVideo('fal-ai/minimax-video/image-to-video', 'my-api-key', { + proxyUrl: '/api/fal/proxy', + }) + + expect(mockConfig).toHaveBeenCalledWith({ + proxyUrl: '/api/fal/proxy', + }) + }) + }) +}) diff --git a/packages/typescript/ai-fal/tsconfig.json b/packages/typescript/ai-fal/tsconfig.json new file mode 100644 index 00000000..2d3b235e --- /dev/null +++ b/packages/typescript/ai-fal/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist" + }, + "include": ["src/**/*.ts", "src/**/*.tsx", "./tests/**/*.ts"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-fal/vite.config.ts b/packages/typescript/ai-fal/vite.config.ts new file mode 100644 index 00000000..77bcc2e6 --- /dev/null +++ b/packages/typescript/ai-fal/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bac95717..0c739a60 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -685,6 +685,22 @@ importers: specifier: ^2.11.10 version: 2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + packages/typescript/ai-fal: + dependencies: + '@fal-ai/client': + specifier: ^1.8.3 + version: 1.8.3 + '@tanstack/ai': + specifier: workspace:* + version: link:../ai + devDependencies: + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-gemini: dependencies: '@google/genai': @@ -2275,6 +2291,10 @@ packages: resolution: {integrity: sha512-C3mrr3b5dRVlKPJdfrAXS8+dq+rq8Qm5SNRazca0JKgw1HQERFmrVb0towvMmw5uu8hHKNiQasMaR/tydf3Zsg==} engines: {node: ^20.19.0 || ^22.13.0 || ^23.5.0 || >=24.0.0, npm: '>=10'} + '@fal-ai/client@1.8.3': + resolution: {integrity: sha512-NL6rrWVJiz6pI5m30qRMKwaXLz1r5mNuSrK3hmGtF0gLwtSQ1elhXPkSI75kbp9eMwJBdzBkOsvbwnoSMG3I5A==} + engines: {node: '>=22.0.0'} + '@gerrit0/mini-shiki@3.19.0': resolution: {integrity: sha512-ZSlWfLvr8Nl0T4iA3FF/8VH8HivYF82xQts2DY0tJxZd4wtXJ8AA0nmdW9lmO4hlrh3f9xNwEPtOgqETPqKwDA==} @@ -2386,6 +2406,10 @@ packages: '@microsoft/tsdoc@0.15.1': resolution: {integrity: sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==} + '@msgpack/msgpack@3.1.3': + resolution: {integrity: sha512-47XIizs9XZXvuJgoaJUIE2lFoID8ugvc0jzSHP+Ptfk8nTbnR8g788wv48N03Kx0UkAv559HWRQ3yzOgzlRNUA==} + engines: {node: '>= 18'} + '@napi-rs/wasm-runtime@0.2.12': resolution: {integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==} @@ -5310,6 +5334,10 @@ packages: resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} engines: {node: '>=0.8.x'} + eventsource-parser@1.1.2: + resolution: {integrity: sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA==} + engines: {node: '>=14.18'} + execa@8.0.1: resolution: {integrity: sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==} engines: {node: '>=16.17'} @@ -7178,6 +7206,9 @@ packages: resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==} hasBin: true + robot3@0.4.1: + resolution: {integrity: sha512-hzjy826lrxzx8eRgv80idkf8ua1JAepRc9Efdtj03N3KNJuznQCPlyCJ7gnUmDFwZCLQjxy567mQVKmdv2BsXQ==} + rolldown-plugin-dts@0.18.3: resolution: {integrity: sha512-rd1LZ0Awwfyn89UndUF/HoFF4oH9a5j+2ZeuKSJYM80vmeN/p0gslYMnHTQHBEXPhUlvAlqGA3tVgXB/1qFNDg==} engines: {node: '>=20.19.0'} @@ -9291,6 +9322,12 @@ snapshots: '@faker-js/faker@10.1.0': {} + '@fal-ai/client@1.8.3': + dependencies: + '@msgpack/msgpack': 3.1.3 + eventsource-parser: 1.1.2 + robot3: 0.4.1 + '@gerrit0/mini-shiki@3.19.0': dependencies: '@shikijs/engine-oniguruma': 3.20.0 @@ -9443,6 +9480,8 @@ snapshots: '@microsoft/tsdoc@0.15.1': {} + '@msgpack/msgpack@3.1.3': {} + '@napi-rs/wasm-runtime@0.2.12': dependencies: '@emnapi/core': 1.7.1 @@ -13087,6 +13126,8 @@ snapshots: events@3.3.0: {} + eventsource-parser@1.1.2: {} + execa@8.0.1: dependencies: cross-spawn: 7.0.6 @@ -15396,6 +15437,8 @@ snapshots: dependencies: glob: 10.5.0 + robot3@0.4.1: {} + rolldown-plugin-dts@0.18.3(oxc-resolver@11.15.0)(rolldown@1.0.0-beta.53)(typescript@5.9.3): dependencies: '@babel/generator': 7.28.5 @@ -16506,7 +16549,7 @@ snapshots: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 postcss: 8.5.6 - rollup: 4.53.3 + rollup: 4.55.1 tinyglobby: 0.2.15 optionalDependencies: '@types/node': 24.10.3 From c8ec11076f0d95bc01a3dd0cc99fba3a866c3eb2 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 20 Jan 2026 20:52:59 +1100 Subject: [PATCH 2/4] Remove settings.local.json from commits. Only settings.json should be commited --- .claude/settings.local.json | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 .claude/settings.local.json diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index 082c4ee2..00000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(pnpm install:*)", - "Bash(node -e:*)", - "Bash(pnpm start:*)", - "Bash(pnpm test:lib:*)", - "Bash(pnpm typecheck:*)", - "Bash(pnpm build:*)", - "Bash(find:*)", - "Bash(tree:*)", - "WebSearch", - "WebFetch(domain:fal.ai)", - "mcp__fal__SearchFal" - ] - }, - "sandbox": { - "enabled": true, - "autoAllowBashIfSandboxed": true - } -} From 42035eb77b6f5038d75212c4447890fc07390da8 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 20 Jan 2026 21:00:27 +1100 Subject: [PATCH 3/4] Use the api key from the config in preference to env --- packages/typescript/ai-fal/src/adapters/image.ts | 4 ++-- packages/typescript/ai-fal/src/adapters/video.ts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/typescript/ai-fal/src/adapters/image.ts b/packages/typescript/ai-fal/src/adapters/image.ts index 1c4ab609..246f0681 100644 --- a/packages/typescript/ai-fal/src/adapters/image.ts +++ b/packages/typescript/ai-fal/src/adapters/image.ts @@ -181,7 +181,7 @@ export function createFalImage( } /** - * Create a fal.ai image adapter using the FAL_KEY environment variable. + * Create a fal.ai image adapter using config.apiKey or the FAL_KEY environment variable. * * The model parameter accepts any fal.ai model ID with full type inference. * As you type, you'll get autocomplete for all 600+ supported models. @@ -207,6 +207,6 @@ export function falImage( model: TModel, config?: FalImageConfig, ): FalImageAdapter { - const apiKey = getFalApiKeyFromEnv() + const apiKey = config?.apiKey ?? getFalApiKeyFromEnv() return createFalImage(model, apiKey, config) } diff --git a/packages/typescript/ai-fal/src/adapters/video.ts b/packages/typescript/ai-fal/src/adapters/video.ts index 5b727723..b1cd51b5 100644 --- a/packages/typescript/ai-fal/src/adapters/video.ts +++ b/packages/typescript/ai-fal/src/adapters/video.ts @@ -178,7 +178,7 @@ export function createFalVideo( } /** - * Create a fal.ai video adapter using the FAL_KEY environment variable. + * Create a fal.ai video adapter using config.apiKey or the FAL_KEY environment variable. * * @experimental Video generation is an experimental feature and may change. */ @@ -186,6 +186,6 @@ export function falVideo( model: TModel, config?: FalVideoConfig, ): FalVideoAdapter { - const apiKey = getFalApiKeyFromEnv() + const apiKey = config?.apiKey ?? getFalApiKeyFromEnv() return createFalVideo(model, apiKey, config) } From 83d476717f57d7e42e4a74d4c2d19d0d77383464 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 20 Jan 2026 21:01:38 +1100 Subject: [PATCH 4/4] Moved @tanstack/ai just to peerDependencies. --- packages/typescript/ai-fal/package.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/typescript/ai-fal/package.json b/packages/typescript/ai-fal/package.json index 35556de7..4ace418c 100644 --- a/packages/typescript/ai-fal/package.json +++ b/packages/typescript/ai-fal/package.json @@ -41,8 +41,7 @@ "video-generation" ], "dependencies": { - "@fal-ai/client": "^1.8.3", - "@tanstack/ai": "workspace:*" + "@fal-ai/client": "^1.8.3" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14",