Files
Agent-n8n/n8n-n8n-1.109.2/packages/@n8n/ai-workflow-builder.ee/src/llm-config.ts
2025-09-08 04:48:28 +08:00

61 lines
1.5 KiB
TypeScript
Executable File

// Different LLMConfig type for this file - specific to LLM providers
interface LLMProviderConfig {
apiKey: string;
baseUrl?: string;
headers?: Record<string, string>;
}
export const o4mini = async (config: LLMProviderConfig) => {
const { ChatOpenAI } = await import('@langchain/openai');
return new ChatOpenAI({
model: 'o4-mini-2025-04-16',
apiKey: config.apiKey,
configuration: {
baseURL: config.baseUrl,
defaultHeaders: config.headers,
},
});
};
export const gpt41mini = async (config: LLMProviderConfig) => {
const { ChatOpenAI } = await import('@langchain/openai');
return new ChatOpenAI({
model: 'gpt-4.1-mini-2025-04-14',
apiKey: config.apiKey,
temperature: 0,
maxTokens: -1,
configuration: {
baseURL: config.baseUrl,
defaultHeaders: config.headers,
},
});
};
export const gpt41 = async (config: LLMProviderConfig) => {
const { ChatOpenAI } = await import('@langchain/openai');
return new ChatOpenAI({
model: 'gpt-4.1-2025-04-14',
apiKey: config.apiKey,
temperature: 0.3,
maxTokens: -1,
configuration: {
baseURL: config.baseUrl,
defaultHeaders: config.headers,
},
});
};
export const anthropicClaudeSonnet4 = async (config: LLMProviderConfig) => {
const { ChatAnthropic } = await import('@langchain/anthropic');
return new ChatAnthropic({
model: 'claude-sonnet-4-20250514',
apiKey: config.apiKey,
temperature: 0,
maxTokens: 16000,
anthropicApiUrl: config.baseUrl,
clientOptions: {
defaultHeaders: config.headers,
},
});
};