update infio api
This commit is contained in:
parent
5bdfc91042
commit
d30c118ae2
@ -4,7 +4,7 @@ import { URL } from 'url'
|
||||
import { GoogleGenerativeAI } from '@google/generative-ai'
|
||||
import { OpenAI } from 'openai'
|
||||
|
||||
import { ALIBABA_QWEN_BASE_URL, OPENAI_BASE_URL, SILICONFLOW_BASE_URL } from "../../constants"
|
||||
import { ALIBABA_QWEN_BASE_URL, INFIO_BASE_URL, OPENAI_BASE_URL, SILICONFLOW_BASE_URL } from "../../constants"
|
||||
import { EmbeddingModel } from '../../types/embedding'
|
||||
import { ApiProvider } from '../../types/llm/model'
|
||||
import { InfioSettings } from '../../types/settings'
|
||||
@ -20,6 +20,42 @@ export const getEmbeddingModel = (
|
||||
settings: InfioSettings,
|
||||
): EmbeddingModel => {
|
||||
switch (settings.embeddingModelProvider) {
|
||||
case ApiProvider.Infio: {
|
||||
const openai = new OpenAI({
|
||||
apiKey: settings.infioProvider.apiKey,
|
||||
baseURL: INFIO_BASE_URL,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
const modelInfo = GetEmbeddingModelInfo(settings.embeddingModelProvider, settings.embeddingModelId)
|
||||
return {
|
||||
id: settings.embeddingModelId,
|
||||
dimension: modelInfo.dimensions,
|
||||
getEmbedding: async (text: string) => {
|
||||
try {
|
||||
if (!openai.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: settings.embeddingModelId,
|
||||
input: text,
|
||||
})
|
||||
return embedding.data[0].embedding
|
||||
} catch (error) {
|
||||
if (
|
||||
error.status === 429 &&
|
||||
error.message.toLowerCase().includes('rate limit')
|
||||
) {
|
||||
throw new LLMRateLimitExceededException(
|
||||
'OpenAI API rate limit exceeded. Please try again later.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
case ApiProvider.OpenAI: {
|
||||
const baseURL = settings.openaiProvider.useCustomUrl ? settings.openaiProvider.baseUrl : OPENAI_BASE_URL
|
||||
const openai = new OpenAI({
|
||||
|
||||
@ -188,6 +188,26 @@ async function fetchInfioModels(apiKey?: string): Promise<Record<string, ModelIn
|
||||
}
|
||||
}
|
||||
|
||||
export const infioEmbeddingModels = {
|
||||
"openai/text-embedding-3-small": {
|
||||
dimensions: 1536,
|
||||
description: "Increased performance over 2nd generation ada embedding model"
|
||||
},
|
||||
"gemini/gemini-embedding-exp-03-07": {
|
||||
dimensions: 1024,
|
||||
description: "Most capable 2nd generation embedding model, replacing 16 first generation models"
|
||||
},
|
||||
"deepseek/embedding-large-text": {
|
||||
dimensions: 1024,
|
||||
description: "Most capable embedding model for both English and non-English tasks"
|
||||
},
|
||||
"deepseek/embedding-text": {
|
||||
dimensions: 512,
|
||||
description: "Most capable embedding model for both English and non-English tasks"
|
||||
}
|
||||
} as const satisfies Record<string, EmbeddingModelInfo>
|
||||
|
||||
|
||||
// OpenRouter
|
||||
// https://openrouter.ai/models?order=newest&supported_parameters=tools
|
||||
export const openRouterDefaultModelId = "anthropic/claude-sonnet-4" // will always exist in openRouterModels
|
||||
@ -1593,6 +1613,7 @@ export const GetAllProviders = (): ApiProvider[] => {
|
||||
|
||||
export const GetEmbeddingProviders = (): ApiProvider[] => {
|
||||
return [
|
||||
ApiProvider.Infio,
|
||||
ApiProvider.OpenAI,
|
||||
ApiProvider.SiliconFlow,
|
||||
ApiProvider.Google,
|
||||
@ -1684,6 +1705,8 @@ export const GetProviderModelIds = async (provider: ApiProvider, settings?: Infi
|
||||
// Get all embedding models for a provider
|
||||
export const GetEmbeddingProviderModels = (provider: ApiProvider): Record<string, EmbeddingModelInfo> => {
|
||||
switch (provider) {
|
||||
case ApiProvider.Infio:
|
||||
return infioEmbeddingModels
|
||||
case ApiProvider.Google:
|
||||
return geminiEmbeddingModels
|
||||
case ApiProvider.SiliconFlow:
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user