From e49265c9c179617da2c6d5a229ba12b5eceb1ffb Mon Sep 17 00:00:00 2001 From: duanfuxiang Date: Sun, 13 Apr 2025 15:21:28 +0800 Subject: [PATCH] update groq --- src/core/llm/manager.ts | 5 ----- src/utils/api.ts | 23 +++++++++++++++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/core/llm/manager.ts b/src/core/llm/manager.ts index 74dc4ee..bdaac2d 100644 --- a/src/core/llm/manager.ts +++ b/src/core/llm/manager.ts @@ -78,11 +78,6 @@ class LLMManager implements LLMManagerInterface { this.anthropicProvider = new AnthropicProvider(settings.anthropicProvider.apiKey) this.googleProvider = new GeminiProvider(settings.googleProvider.apiKey) this.groqProvider = new GroqProvider(settings.groqProvider.apiKey) - console.log('GrokProvider', - settings.grokProvider.apiKey, - settings.grokProvider.baseUrl, - settings.grokProvider.useCustomUrl - ) this.grokProvider = new OpenAICompatibleProvider(settings.grokProvider.apiKey, settings.grokProvider.baseUrl && settings.grokProvider.useCustomUrl ? settings.grokProvider.baseUrl diff --git a/src/utils/api.ts b/src/utils/api.ts index 9061b2d..9c21728 100644 --- a/src/utils/api.ts +++ b/src/utils/api.ts @@ -1034,6 +1034,29 @@ export const siliconFlowEmbeddingModels = { export type GroqModelId = keyof typeof groqModels export const groqDefaultModelId: GroqModelId = "llama-3.3-70b-versatile" export const groqModels = { + "meta-llama/llama-4-scout-17b-16e-instruct": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, + }, + "meta-llama/llama-4-maverick-17b-128e-instruct": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, + }, + "qwen-qwq-32b": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0, + }, "llama-3.2-1b-preview": { maxTokens: 4096, contextWindow: 8192,