// Different LLMConfig type for this file - specific to LLM providers interface LLMProviderConfig { apiKey: string; baseUrl?: string; headers?: Record; } export const o4mini = async (config: LLMProviderConfig) => { const { ChatOpenAI } = await import('@langchain/openai'); return new ChatOpenAI({ model: 'o4-mini-2025-04-16', apiKey: config.apiKey, configuration: { baseURL: config.baseUrl, defaultHeaders: config.headers, }, }); }; export const gpt41mini = async (config: LLMProviderConfig) => { const { ChatOpenAI } = await import('@langchain/openai'); return new ChatOpenAI({ model: 'gpt-4.1-mini-2025-04-14', apiKey: config.apiKey, temperature: 0, maxTokens: -1, configuration: { baseURL: config.baseUrl, defaultHeaders: config.headers, }, }); }; export const gpt41 = async (config: LLMProviderConfig) => { const { ChatOpenAI } = await import('@langchain/openai'); return new ChatOpenAI({ model: 'gpt-4.1-2025-04-14', apiKey: config.apiKey, temperature: 0.3, maxTokens: -1, configuration: { baseURL: config.baseUrl, defaultHeaders: config.headers, }, }); }; export const anthropicClaudeSonnet4 = async (config: LLMProviderConfig) => { const { ChatAnthropic } = await import('@langchain/anthropic'); return new ChatAnthropic({ model: 'claude-sonnet-4-20250514', apiKey: config.apiKey, temperature: 0, maxTokens: 16000, anthropicApiUrl: config.baseUrl, clientOptions: { defaultHeaders: config.headers, }, }); };