update openrouter models config

This commit is contained in:
duanfuxiang 2025-02-17 15:31:25 +08:00
parent 025dc85c59
commit 8eaafd5e75
7 changed files with 145 additions and 57 deletions

View File

@ -1,6 +1,6 @@
import * as Tooltip from '@radix-ui/react-tooltip'
import { Check, CopyIcon } from 'lucide-react'
import { useMemo, useState } from 'react'
import { useEffect, useState } from 'react'
import { ChatAssistantMessage } from '../../types/chat'
import { calculateLLMCost } from '../../utils/price-calculator'
@ -44,15 +44,23 @@ function CopyButton({ message }: { message: ChatAssistantMessage }) {
}
function LLMResponesInfoButton({ message }: { message: ChatAssistantMessage }) {
const cost = useMemo<number | null>(() => {
if (!message.metadata?.model || !message.metadata?.usage) {
return 0
const [cost, setCost] = useState<number | null>(0);
useEffect(() => {
async function calculateCost() {
if (!message.metadata?.model || !message.metadata?.usage) {
setCost(0);
return;
}
const calculatedCost = await calculateLLMCost({
model: message.metadata.model,
usage: message.metadata.usage,
});
setCost(calculatedCost);
}
return calculateLLMCost({
model: message.metadata.model,
usage: message.metadata.usage,
})
}, [message])
calculateCost();
}, [message]);
return (
<Tooltip.Provider delayDuration={0}>

View File

@ -1,17 +1,31 @@
import * as DropdownMenu from '@radix-ui/react-dropdown-menu'
import { ChevronDown, ChevronUp } from 'lucide-react'
import { useMemo, useState } from 'react'
import { useEffect, useState } from 'react'
import { useSettings } from '../../../contexts/SettingsContext'
import { GetProviderModelIds } from "../../../utils/api"
export function ModelSelect() {
const { settings, setSettings } = useSettings()
const [isOpen, setIsOpen] = useState(false)
const [chatModelId, setChatModelId] = useState(settings.chatModelId)
const [providerModels, setProviderModels] = useState<string[]>([])
const [isLoading, setIsLoading] = useState(true)
const[chatModelId, setChatModelId] = useState(settings.chatModelId)
const currProviderModels = useMemo(() => {
return GetProviderModelIds(settings.chatModelProvider)
useEffect(() => {
const fetchModels = async () => {
setIsLoading(true)
try {
const models = await GetProviderModelIds(settings.chatModelProvider)
setProviderModels(models)
} catch (error) {
console.error('Failed to fetch provider models:', error)
} finally {
setIsLoading(false)
}
}
fetchModels()
}, [settings.chatModelProvider])
return (
@ -29,21 +43,25 @@ export function ModelSelect() {
<DropdownMenu.Content
className="infio-popover">
<ul>
{currProviderModels.map((modelId) => (
<DropdownMenu.Item
key={modelId}
onSelect={() => {
setChatModelId(modelId)
setSettings({
...settings,
chatModelId: modelId,
})
}}
asChild
>
<li>{modelId}</li>
</DropdownMenu.Item>
))}
{isLoading ? (
<li>Loading...</li>
) : (
providerModels.map((modelId) => (
<DropdownMenu.Item
key={modelId}
onSelect={() => {
setChatModelId(modelId)
setSettings({
...settings,
chatModelId: modelId,
})
}}
asChild
>
<li>{modelId}</li>
</DropdownMenu.Item>
))
)}
</ul>
</DropdownMenu.Content>
</DropdownMenu.Portal>

View File

@ -58,14 +58,19 @@ const ControlArea: React.FC<ControlAreaProps> = ({
onModelChange,
isSubmitting,
}) => {
const currProviderModels = useMemo(() => {
return GetProviderModelIds(settings.chatModelProvider)
.map((modelId) => (
<option key={modelId} value={modelId}>
{modelId}
</option>
))
}, [settings])
const [providerModels, setProviderModels] = useState<string[]>([]);
useEffect(() => {
const fetchModels = async () => {
try {
const models = await GetProviderModelIds(settings.chatModelProvider);
setProviderModels(models);
} catch (error) {
console.error("Failed to fetch provider models:", error);
}
};
fetchModels();
}, [settings]);
return (
<div className="infio-ai-block-controls">
@ -75,7 +80,11 @@ const ControlArea: React.FC<ControlAreaProps> = ({
onChange={(e) => onModelChange(e.target.value)}
disabled={isSubmitting}
>
{currProviderModels}
{providerModels.map((modelId) => (
<option key={modelId} value={modelId}>
{modelId}
</option>
))}
</select>
<button
className="infio-ai-block-submit-button"

View File

@ -34,7 +34,7 @@ class LLMClient {
async queryChatModel(messages: RequestMessage[]): Promise<Result<string, Error>> {
const data = await this.llm.generateResponse(this.model, {
model: this.model.name,
model: this.model.modelId,
messages: messages,
stream: false,
})

View File

@ -170,9 +170,19 @@ export const ComboBoxComponent: React.FC<ComboBoxComponentProps> = ({
const providers = isEmbedding ? GetEmbeddingProviders() : GetAllProviders()
const modelIds = useMemo(() => {
return isEmbedding ? GetEmbeddingProviderModelIds(modelProvider) : GetProviderModelIds(modelProvider)
}, [modelProvider])
const [modelIds, setModelIds] = useState<string[]>([]);
// Replace useMemo with useEffect for async fetching
useEffect(() => {
const fetchModelIds = async () => {
const ids = isEmbedding
? GetEmbeddingProviderModelIds(modelProvider)
: await GetProviderModelIds(modelProvider);
setModelIds(ids);
};
fetchModelIds();
}, [modelProvider, isEmbedding]);
const searchableItems = useMemo(() => {
return modelIds.map((id) => ({
@ -182,8 +192,8 @@ export const ComboBoxComponent: React.FC<ComboBoxComponentProps> = ({
}, [modelIds])
// 初始化 fuse用于模糊搜索简单配置 threshold 可按需调整
const fuse = useMemo(() => {
return new Fuse(searchableItems, {
const fuse: Fuse<SearchableItem> = useMemo(() => {
return new Fuse<SearchableItem>(searchableItems, {
keys: ["html"],
threshold: 0.6,
shouldSort: true,
@ -200,7 +210,7 @@ export const ComboBoxComponent: React.FC<ComboBoxComponentProps> = ({
? highlight(fuse.search(searchTerm))
: searchableItems.map(item => ({
...item,
html: [{ text: item.html, isHighlighted: false }]
html: typeof item.html === 'string' ? [{ text: item.html, isHighlighted: false }] : item.html
}))
return results
}, [searchableItems, searchTerm, fuse])

View File

@ -1,3 +1,4 @@
import { OPENROUTER_BASE_URL } from '../constants'
import { ApiProvider } from '../types/llm/model'
export interface ModelInfo {
@ -1143,13 +1144,50 @@ export const GetEmbeddingProviders = (): ApiProvider[] => {
ApiProvider.AlibabaQwen
]
}
let openRouterModelsCache: Record<string, ModelInfo> | null = null;
async function fetchOpenRouterModels(): Promise<Record<string, ModelInfo>> {
if (openRouterModelsCache) {
return openRouterModelsCache;
}
try {
const response = await fetch(OPENROUTER_BASE_URL + "/models");
const data = await response.json();
const models: Record<string, ModelInfo> = {};
if (data?.data) {
for (const model of data.data) {
models[model.id] = {
maxTokens: model.top_provider?.max_completion_tokens ?? model.context_length,
contextWindow: model.context_length,
supportsImages: model.architecture?.modality?.includes("image") ?? false,
supportsPromptCache: false,
inputPrice: model.pricing?.prompt ?? 0,
outputPrice: model.pricing?.completion ?? 0,
description: model.description,
};
}
}
openRouterModelsCache = models;
return models;
} catch (error) {
console.error('Failed to fetch OpenRouter models:', error);
return {
[openRouterDefaultModelId]: openRouterDefaultModelInfo
};
}
}
// Get all models for a provider
export const GetProviderModels = (provider: ApiProvider): Record<string, ModelInfo> => {
export const GetProviderModels = async (provider: ApiProvider): Promise<Record<string, ModelInfo>> => {
switch (provider) {
case ApiProvider.Infio:
return infioModels
case ApiProvider.OpenRouter:
return {}
return await fetchOpenRouterModels()
case ApiProvider.OpenAI:
return openAiNativeModels
case ApiProvider.AlibabaQwen:
@ -1172,7 +1210,16 @@ export const GetProviderModels = (provider: ApiProvider): Record<string, ModelIn
return {}
}
}
// Get all models for a provider
// Get all model ids for a provider
export const GetProviderModelIds = async (provider: ApiProvider): Promise<string[]> => {
const models = await GetProviderModels(provider)
return Object.keys(models)
}
/// Embedding models
// Get all embedding models for a provider
export const GetEmbeddingProviderModels = (provider: ApiProvider): Record<string, EmbeddingModelInfo> => {
switch (provider) {
case ApiProvider.Google:
@ -1187,15 +1234,11 @@ export const GetEmbeddingProviderModels = (provider: ApiProvider): Record<string
return {}
}
}
// Get all model ids for a provider
export const GetProviderModelIds = (provider: ApiProvider): string[] => {
return Object.keys(GetProviderModels(provider))
}
// Get all embedding model ids for a provider
export const GetEmbeddingProviderModelIds = (provider: ApiProvider): string[] => {
return Object.keys(GetEmbeddingProviderModels(provider))
}
// Get embedding model info for a provider and model id
export const GetEmbeddingModelInfo = (provider: ApiProvider, modelId: string): EmbeddingModelInfo => {
const models = GetEmbeddingProviderModels(provider)
return models[modelId]

View File

@ -4,14 +4,14 @@ import { ResponseUsage } from '../types/llm/response'
import { GetProviderModels } from './api'
// Returns the cost in dollars. Returns null if the model is not supported.
export const calculateLLMCost = ({
export const calculateLLMCost = async ({
model,
usage,
}: {
model: LLMModel
usage: ResponseUsage
}): number | null => {
const providerModels = GetProviderModels(model.provider)
}): Promise<number | null> => {
const providerModels = await GetProviderModels(model.provider)
if (!providerModels) {
return null
}